diff --git a/Makefile b/Makefile index 8f0f8c6508..2716d8b12f 100644 --- a/Makefile +++ b/Makefile @@ -195,7 +195,6 @@ proto: protoc helper/storagepacker/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/forwarding/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc sdk/logical/*.proto --go_out=plugins=grpc,paths=source_relative:. - protoc sdk/physical/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc physical/raft/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/identity/mfa/types.proto --go_out=plugins=grpc,paths=source_relative:. protoc helper/identity/types.proto --go_out=plugins=grpc,paths=source_relative:. @@ -203,7 +202,6 @@ proto: protoc sdk/plugin/pb/*.proto --go_out=plugins=grpc,paths=source_relative:. sed -i -e 's/Id/ID/' vault/request_forwarding_service.pb.go sed -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/protobuf:"/sentinel:"" protobuf:"/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go - sed -i -e 's/Iv/IV/' -e 's/Hmac/HMAC/' sdk/physical/types.pb.go fmtcheck: @true diff --git a/api/go.mod b/api/go.mod index 5b54ad919f..06a826193d 100644 --- a/api/go.mod +++ b/api/go.mod @@ -14,7 +14,7 @@ require ( github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/vault/sdk v0.1.14-0.20191218213202-9caafff72a1f github.com/mitchellh/mapstructure v1.1.2 - golang.org/x/net v0.0.0-20190620200207-3b0461eec859 + golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 gopkg.in/square/go-jose.v2 v2.3.1 ) diff --git a/api/go.sum b/api/go.sum index 24a9113343..17b39e99be 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,8 +1,32 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= @@ -11,6 +35,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -18,13 +44,23 @@ github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkPro github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -33,9 +69,11 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.10.1 h1:uyt/l0dWjJ879yiAu+T7FG3/6QX+zwm4bQ8P7XsYt3o= github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88/go.mod h1:Pm+Umb/6Gij6ZG534L7QDyvkauaOQWGb+arj9aFjCE0= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= @@ -53,8 +91,15 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -73,7 +118,10 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -88,26 +136,39 @@ github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -118,26 +179,46 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/builtin/credential/aws/backend.go b/builtin/credential/aws/backend.go index 8c52b4b794..352bb14812 100644 --- a/builtin/credential/aws/backend.go +++ b/builtin/credential/aws/backend.go @@ -10,8 +10,8 @@ import ( "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" - "github.com/hashicorp/vault/helper/awsutil" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/awsutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" cache "github.com/patrickmn/go-cache" @@ -255,14 +255,14 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } iamClient, err := b.clientIAM(ctx, s, region.ID(), entity.AccountNumber) if err != nil { - return "", awsutil.AppendLogicalError(err) + return "", awsutil.AppendAWSError(err) } switch entity.Type { case "user": userInfo, err := iamClient.GetUser(&iam.GetUserInput{UserName: &entity.FriendlyName}) if err != nil { - return "", awsutil.AppendLogicalError(err) + return "", awsutil.AppendAWSError(err) } if userInfo == nil { return "", fmt.Errorf("got nil result from GetUser") @@ -271,7 +271,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag case "role": roleInfo, err := iamClient.GetRole(&iam.GetRoleInput{RoleName: &entity.FriendlyName}) if err != nil { - return "", awsutil.AppendLogicalError(err) + return "", awsutil.AppendAWSError(err) } if roleInfo == nil { return "", fmt.Errorf("got nil result from GetRole") @@ -280,7 +280,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag case "instance-profile": profileInfo, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName}) if err != nil { - return "", awsutil.AppendLogicalError(err) + return "", awsutil.AppendAWSError(err) } if profileInfo == nil { return "", fmt.Errorf("got nil result from GetInstanceProfile") diff --git a/builtin/credential/aws/cli.go b/builtin/credential/aws/cli.go index eb4659f90a..7096f6cb4f 100644 --- a/builtin/credential/aws/cli.go +++ b/builtin/credential/aws/cli.go @@ -13,9 +13,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/awsutil" + "github.com/hashicorp/vault/sdk/helper/awsutil" ) type CLIHandler struct{} @@ -39,7 +38,10 @@ func GenerateLoginData(creds *credentials.Credentials, headerValue, configuredRe loginData := make(map[string]interface{}) // Use the credentials we've found to construct an STS session - region := awsutil.GetOrDefaultRegion(hclog.Default(), configuredRegion) + region, err := awsutil.GetRegion(configuredRegion) + if err != nil { + return nil, err + } stsSession, err := session.NewSessionWithOptions(session.Options{ Config: aws.Config{ Credentials: creds, diff --git a/builtin/credential/aws/client.go b/builtin/credential/aws/client.go index e495dd2579..547d65a0dd 100644 --- a/builtin/credential/aws/client.go +++ b/builtin/credential/aws/client.go @@ -12,7 +12,7 @@ import ( "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/vault/helper/awsutil" + "github.com/hashicorp/vault/sdk/helper/awsutil" "github.com/hashicorp/vault/sdk/logical" ) diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go index 6725617a8e..8cf7552f31 100644 --- a/builtin/credential/aws/path_login.go +++ b/builtin/credential/aws/path_login.go @@ -22,8 +22,8 @@ import ( "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/awsutil" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/awsutil" "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/strutil" @@ -137,7 +137,7 @@ func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName str InstanceProfileName: aws.String(instanceProfileName), }) if err != nil { - return "", awsutil.AppendLogicalError(err) + return "", awsutil.AppendAWSError(err) } if profile == nil { return "", fmt.Errorf("nil output while getting instance profile details") diff --git a/builtin/credential/aws/path_role_test.go b/builtin/credential/aws/path_role_test.go index 36118fa838..eb6e8c209a 100644 --- a/builtin/credential/aws/path_role_test.go +++ b/builtin/credential/aws/path_role_test.go @@ -8,8 +8,8 @@ import ( "testing" "github.com/go-test/deep" - "github.com/hashicorp/vault/helper/awsutil" vlttesting "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/awsutil" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" diff --git a/builtin/logical/aws/client.go b/builtin/logical/aws/client.go index f37ce10082..4379f3ab37 100644 --- a/builtin/logical/aws/client.go +++ b/builtin/logical/aws/client.go @@ -11,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/vault/helper/awsutil" + "github.com/hashicorp/vault/sdk/helper/awsutil" "github.com/hashicorp/vault/sdk/logical" ) diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go index c512aaa4f7..3619819626 100644 --- a/builtin/logical/aws/secret_access_keys.go +++ b/builtin/logical/aws/secret_access_keys.go @@ -11,8 +11,8 @@ import ( "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/errwrap" - "github.com/hashicorp/vault/helper/awsutil" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/awsutil" "github.com/hashicorp/vault/sdk/logical" ) diff --git a/command/agent/auth/aws/aws.go b/command/agent/auth/aws/aws.go index e1eb459710..1322d76795 100644 --- a/command/agent/auth/aws/aws.go +++ b/command/agent/auth/aws/aws.go @@ -19,7 +19,7 @@ import ( "github.com/hashicorp/vault/api" awsauth "github.com/hashicorp/vault/builtin/credential/aws" "github.com/hashicorp/vault/command/agent/auth" - "github.com/hashicorp/vault/helper/awsutil" + "github.com/hashicorp/vault/sdk/helper/awsutil" ) const ( diff --git a/command/audit_enable_test.go b/command/audit_enable_test.go index bea8c32233..c43a812758 100644 --- a/command/audit_enable_test.go +++ b/command/audit_enable_test.go @@ -2,6 +2,7 @@ package command import ( "io/ioutil" + "os" "strings" "testing" @@ -189,6 +190,11 @@ func TestAuditEnableCommand_Run(t *testing.T) { args = append(args, "file_path=discard") case "socket": args = append(args, "address=127.0.0.1:8888") + case "syslog": + if _, exists := os.LookupEnv("WSLENV"); exists { + t.Log("skipping syslog test on WSL") + continue + } } code := cmd.Run(args) if exp := 0; code != exp { diff --git a/command/seal_migration_test.go b/command/seal_migration_test.go index 008783d8ba..ca1d5693cf 100644 --- a/command/seal_migration_test.go +++ b/command/seal_migration_test.go @@ -8,6 +8,8 @@ import ( "testing" hclog "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" @@ -16,7 +18,6 @@ import ( "github.com/hashicorp/vault/shamir" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" - shamirseal "github.com/hashicorp/vault/vault/seal/shamir" ) func TestSealMigration(t *testing.T) { @@ -29,9 +30,13 @@ func TestSealMigration(t *testing.T) { if err != nil { t.Fatal(err) } - shamirSeal := vault.NewDefaultSeal(shamirseal.NewSeal(logger.Named("shamir"))) + shamirwrapper := vault.NewDefaultSeal(&seal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: logger.Named("shamir"), + }), + }) coreConfig := &vault.CoreConfig{ - Seal: shamirSeal, + Seal: shamirwrapper, Physical: phys, HAPhysical: haPhys.(physical.HABackend), DisableSealWrap: true, @@ -201,7 +206,7 @@ func TestSealMigration(t *testing.T) { } altTestSeal := seal.NewTestSeal(nil) - altTestSeal.Type = "test-alternate" + altTestSeal.SetType("test-alternate") altSeal := vault.NewAutoSeal(altTestSeal) { @@ -252,7 +257,7 @@ func TestSealMigration(t *testing.T) { core := cluster.Cores[0].Core - if err := adjustCoreForSealMigration(logger, core, shamirSeal, altSeal); err != nil { + if err := adjustCoreForSealMigration(logger, core, shamirwrapper, altSeal); err != nil { t.Fatal(err) } @@ -288,7 +293,7 @@ func TestSealMigration(t *testing.T) { { logger.SetLevel(hclog.Trace) logger.Info("integ: verify autoseal is off and the expected key shares work") - coreConfig.Seal = shamirSeal + coreConfig.Seal = shamirwrapper cluster := vault.NewTestCluster(t, coreConfig, clusterConfig) cluster.Start() defer cluster.Cleanup() diff --git a/command/server.go b/command/server.go index 8ee5423594..add994b7dd 100644 --- a/command/server.go +++ b/command/server.go @@ -28,6 +28,8 @@ import ( stackdriver "github.com/google/go-metrics-stackdriver" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/audit" @@ -50,7 +52,6 @@ import ( sr "github.com/hashicorp/vault/serviceregistration" "github.com/hashicorp/vault/vault" vaultseal "github.com/hashicorp/vault/vault/seal" - shamirseal "github.com/hashicorp/vault/vault/seal/shamir" "github.com/mitchellh/cli" "github.com/mitchellh/go-testing-interface" "github.com/posener/complete" @@ -488,7 +489,7 @@ func (c *ServerCommand) runRecoveryMode() int { var sealConfigError error if len(config.Seals) == 0 { - config.Seals = append(config.Seals, &server.Seal{Type: vaultseal.Shamir}) + config.Seals = append(config.Seals, &server.Seal{Type: wrapping.Shamir}) } if len(config.Seals) > 1 { @@ -497,7 +498,7 @@ func (c *ServerCommand) runRecoveryMode() int { } configSeal := config.Seals[0] - sealType := vaultseal.Shamir + sealType := wrapping.Shamir if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { sealType = os.Getenv("VAULT_SEAL_TYPE") configSeal.Type = sealType @@ -507,7 +508,11 @@ func (c *ServerCommand) runRecoveryMode() int { var seal vault.Seal sealLogger := c.logger.Named(sealType) - seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("shamir")))) + seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: c.logger.Named("shamir"), + }), + })) if sealConfigError != nil { if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { c.UI.Error(fmt.Sprintf( @@ -971,16 +976,16 @@ func (c *ServerCommand) Run(args []string) int { // Handle the case where no seal is provided switch len(config.Seals) { case 0: - config.Seals = append(config.Seals, &server.Seal{Type: vaultseal.Shamir}) + config.Seals = append(config.Seals, &server.Seal{Type: wrapping.Shamir}) case 1: // If there's only one seal and it's disabled assume they want to // migrate to a shamir seal and simply didn't provide it if config.Seals[0].Disabled { - config.Seals = append(config.Seals, &server.Seal{Type: vaultseal.Shamir}) + config.Seals = append(config.Seals, &server.Seal{Type: wrapping.Shamir}) } } for _, configSeal := range config.Seals { - sealType := vaultseal.Shamir + sealType := wrapping.Shamir if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { sealType = os.Getenv("VAULT_SEAL_TYPE") configSeal.Type = sealType @@ -991,7 +996,11 @@ func (c *ServerCommand) Run(args []string) int { var seal vault.Seal sealLogger := c.logger.Named(sealType) allLoggers = append(allLoggers, sealLogger) - seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("shamir")))) + seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: c.logger.Named("shamir"), + }), + })) if sealConfigError != nil { if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { c.UI.Error(fmt.Sprintf( @@ -1847,7 +1856,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig } } - if core.SealAccess().StoredKeysSupported() != vault.StoredKeysNotSupported { + if core.SealAccess().StoredKeysSupported() != vaultseal.StoredKeysNotSupported { barrierConfig.StoredShares = 1 } @@ -1861,7 +1870,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig } // Handle unseal with stored keys - if core.SealAccess().StoredKeysSupported() == vault.StoredKeysSupportedGeneric { + if core.SealAccess().StoredKeysSupported() == vaultseal.StoredKeysSupportedGeneric { err := core.UnsealWithStoredKeys(ctx) if err != nil { return nil, err diff --git a/command/server/seal/server_seal.go b/command/server/seal/server_seal.go index 21af6d4106..9cdb3c581e 100644 --- a/command/server/seal/server_seal.go +++ b/command/server/seal/server_seal.go @@ -3,40 +3,40 @@ package seal import ( "fmt" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal" ) var ( ConfigureSeal = configureSeal ) -func configureSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (outseal vault.Seal, err error) { +func configureSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger hclog.Logger, inseal vault.Seal) (outseal vault.Seal, err error) { switch configSeal.Type { - case seal.AliCloudKMS: + case wrapping.AliCloudKMS: return configureAliCloudKMSSeal(configSeal, infoKeys, info, logger, inseal) - case seal.AWSKMS: + case wrapping.AWSKMS: return configureAWSKMSSeal(configSeal, infoKeys, info, logger, inseal) - case seal.GCPCKMS: - return configureGCPCKMSSeal(configSeal, infoKeys, info, logger, inseal) - - case seal.AzureKeyVault: + case wrapping.AzureKeyVault: return configureAzureKeyVaultSeal(configSeal, infoKeys, info, logger, inseal) - case seal.OCIKMS: + case wrapping.GCPCKMS: + return configureGCPCKMSSeal(configSeal, infoKeys, info, logger, inseal) + + case wrapping.OCIKMS: return configureOCIKMSSeal(configSeal, infoKeys, info, logger, inseal) - case seal.Transit: + case wrapping.Transit: return configureTransitSeal(configSeal, infoKeys, info, logger, inseal) - case seal.PKCS11: + case wrapping.PKCS11: return nil, fmt.Errorf("Seal type 'pkcs11' requires the Vault Enterprise HSM binary") - case seal.Shamir: + case wrapping.Shamir: return inseal, nil default: diff --git a/command/server/seal/server_seal_alicloudkms.go b/command/server/seal/server_seal_alicloudkms.go index 6d20da7a58..269aab56da 100644 --- a/command/server/seal/server_seal_alicloudkms.go +++ b/command/server/seal/server_seal_alicloudkms.go @@ -3,14 +3,15 @@ package seal import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal/alicloudkms" + "github.com/hashicorp/vault/vault/seal" ) func configureAliCloudKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) { - kms := alicloudkms.NewSeal(logger) + kms := alicloudkms.NewWrapper(nil) kmsInfo, err := kms.SetConfig(configSeal.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error @@ -18,7 +19,9 @@ func configureAliCloudKMSSeal(configSeal *server.Seal, infoKeys *[]string, info return nil, err } } - autoseal := vault.NewAutoSeal(kms) + autoseal := vault.NewAutoSeal(&seal.Access{ + Wrapper: kms, + }) if kmsInfo != nil { *infoKeys = append(*infoKeys, "Seal Type", "AliCloud KMS Region", "AliCloud KMS KeyID") (*info)["Seal Type"] = configSeal.Type diff --git a/command/server/seal/server_seal_awskms.go b/command/server/seal/server_seal_awskms.go index f0cefcefd7..2d5b71ea58 100644 --- a/command/server/seal/server_seal_awskms.go +++ b/command/server/seal/server_seal_awskms.go @@ -2,15 +2,16 @@ package seal import ( "github.com/hashicorp/errwrap" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/wrappers/awskms" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal/awskms" + "github.com/hashicorp/vault/vault/seal" ) -func configureAWSKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) { - kms := awskms.NewSeal(logger) +func configureAWSKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger hclog.Logger, inseal vault.Seal) (vault.Seal, error) { + kms := awskms.NewWrapper(nil) kmsInfo, err := kms.SetConfig(configSeal.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error @@ -18,7 +19,9 @@ func configureAWSKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[ return nil, err } } - autoseal := vault.NewAutoSeal(kms) + autoseal := vault.NewAutoSeal(&seal.Access{ + Wrapper: kms, + }) if kmsInfo != nil { *infoKeys = append(*infoKeys, "Seal Type", "AWS KMS Region", "AWS KMS KeyID") (*info)["Seal Type"] = configSeal.Type diff --git a/command/server/seal/server_seal_azurekeyvault.go b/command/server/seal/server_seal_azurekeyvault.go index 443b8dc708..4b26c29ea6 100644 --- a/command/server/seal/server_seal_azurekeyvault.go +++ b/command/server/seal/server_seal_azurekeyvault.go @@ -3,14 +3,15 @@ package seal import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal/azurekeyvault" + "github.com/hashicorp/vault/vault/seal" ) func configureAzureKeyVaultSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) { - kv := azurekeyvault.NewSeal(logger) + kv := azurekeyvault.NewWrapper(nil) kvInfo, err := kv.SetConfig(configSeal.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error @@ -18,7 +19,9 @@ func configureAzureKeyVaultSeal(configSeal *server.Seal, infoKeys *[]string, inf return nil, err } } - autoseal := vault.NewAutoSeal(kv) + autoseal := vault.NewAutoSeal(&seal.Access{ + Wrapper: kv, + }) if kvInfo != nil { *infoKeys = append(*infoKeys, "Seal Type", "Azure Environment", "Azure Vault Name", "Azure Key Name") (*info)["Seal Type"] = configSeal.Type diff --git a/command/server/seal/server_seal_gcpckms.go b/command/server/seal/server_seal_gcpckms.go index 51f9b06cf3..9c0219a82f 100644 --- a/command/server/seal/server_seal_gcpckms.go +++ b/command/server/seal/server_seal_gcpckms.go @@ -3,14 +3,17 @@ package seal import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms" "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/sdk/helper/useragent" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal/gcpckms" + "github.com/hashicorp/vault/vault/seal" ) func configureGCPCKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) { - kms := gcpckms.NewSeal(logger) + kms := gcpckms.NewWrapper(nil) + configSeal.Config["user_agent"] = useragent.String() kmsInfo, err := kms.SetConfig(configSeal.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error @@ -18,7 +21,9 @@ func configureGCPCKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map return nil, err } } - autoseal := vault.NewAutoSeal(kms) + autoseal := vault.NewAutoSeal(&seal.Access{ + Wrapper: kms, + }) if kmsInfo != nil { *infoKeys = append(*infoKeys, "Seal Type", "GCP KMS Project", "GCP KMS Region", "GCP KMS Key Ring", "GCP KMS Crypto Key") (*info)["Seal Type"] = configSeal.Type diff --git a/command/server/seal/server_seal_ocikms.go b/command/server/seal/server_seal_ocikms.go index ef94989104..7c4ff26c97 100644 --- a/command/server/seal/server_seal_ocikms.go +++ b/command/server/seal/server_seal_ocikms.go @@ -3,23 +3,29 @@ package seal import ( log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/wrappers/ocikms" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal/ocikms" + "github.com/hashicorp/vault/vault/seal" ) func configureOCIKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) { - kms := ocikms.NewSeal(logger) + kms := ocikms.NewWrapper(nil) kmsInfo, err := kms.SetConfig(configSeal.Config) if err != nil { logger.Error("error on setting up config for OCI KMS", "error", err) return nil, err } - autoseal := vault.NewAutoSeal(kms) + autoseal := vault.NewAutoSeal(&seal.Access{ + Wrapper: kms, + }) if kmsInfo != nil { *infoKeys = append(*infoKeys, "Seal Type", "OCI KMS KeyID") (*info)["Seal Type"] = configSeal.Type - (*info)["OCI KMS KeyID"] = kmsInfo["key_id"] + (*info)["OCI KMS KeyID"] = kmsInfo[ocikms.KMSConfigKeyID] + (*info)["OCI KMS Crypto Endpoint"] = kmsInfo[ocikms.KMSConfigCryptoEndpoint] + (*info)["OCI KMS Management Endpoint"] = kmsInfo[ocikms.KMSConfigManagementEndpoint] + (*info)["OCI KMS Principal Type"] = kmsInfo["principal_type"] } return autoseal, nil } diff --git a/command/server/seal/server_seal_transit.go b/command/server/seal/server_seal_transit.go index 8382901651..0a9bc1e7a6 100644 --- a/command/server/seal/server_seal_transit.go +++ b/command/server/seal/server_seal_transit.go @@ -3,14 +3,18 @@ package seal import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" + "github.com/hashicorp/go-kms-wrapping/wrappers/transit" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal/transit" + "github.com/hashicorp/vault/vault/seal" ) func configureTransitSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) { - transitSeal := transit.NewSeal(logger) + transitSeal := transit.NewWrapper(&wrapping.WrapperOptions{ + Logger: logger.ResetNamed("seal-transit"), + }) sealInfo, err := transitSeal.SetConfig(configSeal.Config) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error @@ -18,7 +22,9 @@ func configureTransitSeal(configSeal *server.Seal, infoKeys *[]string, info *map return nil, err } } - autoseal := vault.NewAutoSeal(transitSeal) + autoseal := vault.NewAutoSeal(&seal.Access{ + Wrapper: transitSeal, + }) if sealInfo != nil { *infoKeys = append(*infoKeys, "Seal Type", "Transit Address", "Transit Mount Path", "Transit Key Name") (*info)["Seal Type"] = configSeal.Type diff --git a/vault/seal/transit/transit_acc_test.go b/command/server/seal/server_seal_transit_acc_test.go similarity index 79% rename from vault/seal/transit/transit_acc_test.go rename to command/server/seal/server_seal_transit_acc_test.go index 11896b76b8..5b469c0410 100644 --- a/vault/seal/transit/transit_acc_test.go +++ b/command/server/seal/server_seal_transit_acc_test.go @@ -1,4 +1,4 @@ -package transit_test +package seal_test import ( "context" @@ -6,45 +6,43 @@ import ( "os" "path" "reflect" + "strings" "testing" "time" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/wrappers/transit" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/docker" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/vault/seal/transit" "github.com/ory/dockertest" ) -func TestTransitSeal_Lifecycle(t *testing.T) { +func TestTransitWrapper_Lifecycle(t *testing.T) { if os.Getenv("VAULT_ACC") == "" { t.Skip() } cleanup, retAddress, token, mountPath, keyName, _ := prepareTestContainer(t) defer cleanup() - sealConfig := map[string]string{ + wrapperConfig := map[string]string{ "address": retAddress, "token": token, "mount_path": mountPath, "key_name": keyName, } - s := transit.NewSeal(logging.NewVaultLogger(log.Trace)) - _, err := s.SetConfig(sealConfig) + s := transit.NewWrapper(nil) + _, err := s.SetConfig(wrapperConfig) if err != nil { - t.Fatalf("error setting seal config: %v", err) + t.Fatalf("error setting wrapper config: %v", err) } // Test Encrypt and Decrypt calls input := []byte("foo") - swi, err := s.Encrypt(context.Background(), input) + swi, err := s.Encrypt(context.Background(), input, nil) if err != nil { t.Fatalf("err: %s", err.Error()) } - pt, err := s.Decrypt(context.Background(), swi) + pt, err := s.Decrypt(context.Background(), swi, nil) if err != nil { t.Fatalf("err: %s", err.Error()) } @@ -82,28 +80,28 @@ func TestTransitSeal_TokenRenewal(t *testing.T) { t.Fatalf("err: %s", err) } - sealConfig := map[string]string{ + wrapperConfig := map[string]string{ "address": retAddress, "token": rsp.Auth.ClientToken, "mount_path": mountPath, "key_name": keyName, } - s := transit.NewSeal(logging.NewVaultLogger(log.Trace)) - _, err = s.SetConfig(sealConfig) + s := transit.NewWrapper(nil) + _, err = s.SetConfig(wrapperConfig) if err != nil { - t.Fatalf("error setting seal config: %v", err) + t.Fatalf("error setting wrapper config: %v", err) } time.Sleep(7 * time.Second) // Test Encrypt and Decrypt calls input := []byte("foo") - swi, err := s.Encrypt(context.Background(), input) + swi, err := s.Encrypt(context.Background(), input, nil) if err != nil { t.Fatalf("err: %s", err.Error()) } - pt, err := s.Decrypt(context.Background(), swi) + pt, err := s.Decrypt(context.Background(), swi, nil) if err != nil { t.Fatalf("err: %s", err.Error()) } @@ -144,7 +142,19 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress, token, moun } cleanup = func() { - docker.CleanupResource(t, pool, resource) + var err error + for i := 0; i < 10; i++ { + err = pool.Purge(resource) + if err == nil { + return + } + time.Sleep(1 * time.Second) + } + + if strings.Contains(err.Error(), "No such container") { + return + } + t.Fatalf("Failed to cleanup local container: %s", err) } retAddress = fmt.Sprintf("http://127.0.0.1:%s", resource.GetPort("8200/tcp")) diff --git a/command/server_util.go b/command/server_util.go index ba0d746224..0098dfbf58 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -7,10 +7,11 @@ import ( "io" log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/vault" vaultseal "github.com/hashicorp/vault/vault/seal" - shamirseal "github.com/hashicorp/vault/vault/seal/shamir" "github.com/pkg/errors" ) @@ -31,7 +32,7 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal // If we don't have an existing config or if it's the deprecated auto seal // which needs an upgrade, skip out - if existBarrierSealConfig == nil || existBarrierSealConfig.Type == vaultseal.HSMAutoDeprecated { + if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated { return nil } @@ -44,11 +45,11 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal // If we're not coming from Shamir, and the existing type doesn't match // the barrier type, we need both the migration seal and the new seal - if existBarrierSealConfig.Type != vaultseal.Shamir && barrierSeal.BarrierType() != vaultseal.Shamir { + if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir { return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`) } } else { - if unwrapSeal.BarrierType() == vaultseal.Shamir { + if unwrapSeal.BarrierType() == wrapping.Shamir { return errors.New("Shamir seals cannot be set disabled (they should simply not be set)") } } @@ -64,14 +65,18 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal return nil } - if existBarrierSealConfig.Type != vaultseal.Shamir && existRecoverySealConfig == nil { + if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil { return errors.New(`Recovery seal configuration not found for existing seal`) } switch existBarrierSealConfig.Type { - case vaultseal.Shamir: + case wrapping.Shamir: // The value reflected in config is what we're going to - existSeal = vault.NewDefaultSeal(shamirseal.NewSeal(logger.Named("shamir"))) + existSeal = vault.NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: logger.Named("shamir"), + }), + }) newSeal = barrierSeal newBarrierSealConfig := &vault.SealConfig{ Type: newSeal.BarrierType(), @@ -83,7 +88,7 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal newSeal.SetCachedRecoveryConfig(existBarrierSealConfig) default: - if onEnterprise && barrierSeal.BarrierType() == vaultseal.Shamir { + if onEnterprise && barrierSeal.BarrierType() == wrapping.Shamir { return errors.New("Migrating from autoseal to Shamir seal is not currently supported on Vault Enterprise") } diff --git a/go.mod b/go.mod index c1b814ecd0..442eed0f7c 100644 --- a/go.mod +++ b/go.mod @@ -8,8 +8,8 @@ replace github.com/hashicorp/vault/sdk => ./sdk require ( cloud.google.com/go v0.39.0 - github.com/Azure/azure-sdk-for-go v29.0.0+incompatible - github.com/Azure/go-autorest v11.7.1+incompatible + github.com/Azure/azure-sdk-for-go v36.2.0+incompatible + github.com/Azure/go-autorest/autorest v0.9.2 github.com/NYTimes/gziphandler v1.1.1 github.com/SAP/go-hdb v0.14.1 github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect @@ -28,7 +28,6 @@ require ( github.com/cockroachdb/apd v1.1.0 // indirect github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c github.com/coreos/go-semver v0.2.0 - github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d // indirect github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a github.com/dnaeon/go-vcr v1.0.1 // indirect github.com/dsnet/compress v0.0.1 // indirect @@ -48,13 +47,14 @@ require ( github.com/golang/protobuf v1.3.2 github.com/google/go-github v17.0.0+incompatible github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a - github.com/google/go-querystring v1.0.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect github.com/hashicorp/consul-template v0.22.0 github.com/hashicorp/consul/api v1.1.0 github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-gcp-common v0.5.0 github.com/hashicorp/go-hclog v0.10.1 + github.com/hashicorp/go-kms-wrapping v0.0.0-20191229213738-edc2c6e9ee1d github.com/hashicorp/go-memdb v1.0.2 github.com/hashicorp/go-msgpack v0.5.5 github.com/hashicorp/go-multierror v1.0.0 @@ -69,7 +69,7 @@ require ( github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17 github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190814210027-93970f08f2ec - github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20190814210035-08e00d801115 + github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20191119151105-86f21fbc96e3 github.com/hashicorp/vault-plugin-auth-centrify v0.5.2-0.20190814210042-090ec2ed93ce github.com/hashicorp/vault-plugin-auth-cf v0.0.0-20190821162840-1c2205826fee github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190930204802-acfd134850c2 @@ -80,12 +80,12 @@ require ( github.com/hashicorp/vault-plugin-database-elasticsearch v0.0.0-20190814210117-e079e01fbb93 github.com/hashicorp/vault-plugin-secrets-ad v0.6.2 github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190814210129-4d18bec92f56 - github.com/hashicorp/vault-plugin-secrets-azure v0.5.2 - github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191112195538-3c798536d157 + github.com/hashicorp/vault-plugin-secrets-azure v0.5.3-0.20191119150734-45c076c82f1d + github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191119222840-524d5b57ed36 github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190814210149-315cdbf5de6e github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20191017213228-e8cf7060a4d0 github.com/hashicorp/vault/api v1.0.5-0.20191218213558-0bc25f908162 - github.com/hashicorp/vault/sdk v0.1.14-0.20191218213202-9caafff72a1f + github.com/hashicorp/vault/sdk v0.1.14-0.20191229212425-c478d00be0d6 github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect github.com/jackc/pgx v3.3.0+incompatible // indirect @@ -94,7 +94,6 @@ require ( github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869 github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f github.com/kr/pretty v0.1.0 - github.com/kr/pty v1.1.3 // indirect github.com/kr/text v0.1.0 github.com/lib/pq v1.2.0 github.com/mattn/go-colorable v0.1.4 @@ -109,7 +108,7 @@ require ( github.com/ncw/swift v1.0.47 github.com/nwaples/rardecode v1.0.0 // indirect github.com/oklog/run v1.0.0 - github.com/oracle/oci-go-sdk v7.0.0+incompatible + github.com/oracle/oci-go-sdk v12.5.0+incompatible github.com/ory/dockertest v3.3.5+incompatible github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.8.1 @@ -130,9 +129,8 @@ require ( go.uber.org/atomic v1.4.0 golang.org/x/crypto v0.0.0-20191106202628-ed6320f186d4 golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 - golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a - google.golang.org/api v0.5.0 - google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + google.golang.org/api v0.14.0 google.golang.org/grpc v1.22.0 gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/ory-am/dockertest.v3 v3.3.4 diff --git a/go.sum b/go.sum index 21961d4da1..0c3d6cc0dc 100644 --- a/go.sum +++ b/go.sum @@ -2,25 +2,46 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= -cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.39.0 h1:UgQP9na6OTfp4dsAiz/eFpFA1C6tPdH5wiRdi19tuMw= cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= -contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/azure-sdk-for-go v29.0.0+incompatible h1:CYPU39ULbGjQBo3gXIqiWouK0C4F+Pt2Zx5CqGvqknE= -github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v11.7.1+incompatible h1:M2YZIajBBVekV86x0rr1443Lc1F/Ylxb9w+5EtSyX3Q= -github.com/Azure/go-autorest v11.7.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -44,7 +65,6 @@ github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KM github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6 h1:5RwdKFlGKokYBbq4M2ZZ0LzfxdK4e1L4rwQH+76wPkE= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD16bhpKNAanfcDDVU+J0NXqsgHIvGbbe/sy+r6Rs0= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= @@ -55,9 +75,7 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= @@ -68,6 +86,7 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.41 h1:/hj7nZ0586wFqpwjNpzWiUTwtaMgxAZNZKHay80MdXw= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= @@ -80,7 +99,6 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYE github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= @@ -89,8 +107,6 @@ github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f h1:ZMEzE7R0WNq github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f h1:gJzxrodnNd/CtPXjO3WYiakyNzHg3rtAi7rO74ejHYU= github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ= @@ -99,7 +115,6 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyY github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= @@ -115,7 +130,6 @@ github.com/coreos/go-oidc v2.0.0+incompatible h1:+RStIopZ8wooMx+Vs5Bt8zMXxV1ABl5 github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d h1:t5Wuyh53qYyg9eqn4BbnlIT+vmhyww0TatL+zT3uWgI= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -134,7 +148,6 @@ github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -190,16 +203,13 @@ github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dT github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df h1:fwXmhM0OqixzJDOGgTSyNH9eEDij9uGTXwsyWXvyR0A= github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= -github.com/gogo/protobuf v1.0.0 h1:2jyBKDKU/8v3v2xVR2PtiWQviFUyiaGk2rpfyFT8rTM= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -211,9 +221,7 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -221,15 +229,11 @@ github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a h1:ZJu5NB1Bk5ms4vw0Xu4i+jD32SE9jQXyfnOvwhHqlT0= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -248,8 +252,9 @@ github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -267,7 +272,6 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.4.1 h1:pX7cnDwSSmG0dR9yNjCQSSpmsJOqFdT7SzVp5Yl9uVw= github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -291,20 +295,21 @@ github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMC github.com/hashicorp/go-gcp-common v0.5.0 h1:kkIQTjNTopn4eXQ1+lCiHYZXUtgIZvbc6YtAQkMnTos= github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.10.1 h1:uyt/l0dWjJ879yiAu+T7FG3/6QX+zwm4bQ8P7XsYt3o= github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88 h1:YMLQiDrLQT1QEollUbw+yxpW0yJiKVHDGmMpBISeACA= +github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88/go.mod h1:Pm+Umb/6Gij6ZG534L7QDyvkauaOQWGb+arj9aFjCE0= +github.com/hashicorp/go-kms-wrapping v0.0.0-20191229213738-edc2c6e9ee1d h1:smWxnN+O7eRYqt0nDSwdbxacOczfnf3hMAxT9SMxeno= +github.com/hashicorp/go-kms-wrapping v0.0.0-20191229213738-edc2c6e9ee1d/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs= github.com/hashicorp/go-memdb v1.0.2 h1:AIjzJlwIxz2inhZqRJZfe6D15lPeF0/cZyS1BVlnlHg= github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -314,13 +319,10 @@ github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cR github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= -github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= -github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -330,11 +332,9 @@ github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjG github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 h1:PKbxRbsOP7R3f/TpdqcgXrO69T3yd9nLoR+RMRUxSxA= github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -342,7 +342,6 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/gokrb5 v7.3.1-0.20191209171754-1a6fa9886ec3+incompatible h1:9fIGLV0+jeF9D/oT4gZE0oh336tsKA7mxjinPOxKIjU= github.com/hashicorp/gokrb5 v7.3.1-0.20191209171754-1a6fa9886ec3+incompatible/go.mod h1:ke+MQBkyg7J7V+tM7GWSeDTlF27zlpm+u9nP3AANFEg= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -362,19 +361,17 @@ github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8A github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab h1:WzGMwlO1DvaC93SvVOBOKtn+nXGEDXapyJuaRV3/VaY= github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= -github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.3 h1:MWYcmct5EtKz0efYooPcL0yNkem+7kWxqXDi/UIh+8k= github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190814210027-93970f08f2ec h1:HXVE8h6RXFsPJgwWpE+5CscsgekqtX4nhDlZGV9jEe4= github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190814210027-93970f08f2ec/go.mod h1:TYFfVFgKF9x92T7uXouI9rLPkNnyXo/KkNcj5t+mjdM= -github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20190814210035-08e00d801115 h1:E57y918o+c+NoI5k7ohbpZu7vRm1XZKZfC5VQVpJvDI= -github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20190814210035-08e00d801115/go.mod h1:sRhTnkcbjJgPeES0ddCTq8S2waSakyMiWLUwO5J/Wjk= +github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20191119151105-86f21fbc96e3 h1:MPJR3FJzYAbj7hiy0WrUQ8anvZCnE7pZxDTPl4lKFdw= +github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20191119151105-86f21fbc96e3/go.mod h1:D5qI4EY3QwWXqlpnyvRVtFw5CPlJwmlxkxN3SVQeV/U= github.com/hashicorp/vault-plugin-auth-centrify v0.5.2-0.20190814210042-090ec2ed93ce h1:X8umWdCqSVk/75ZjEBDxYL+V8i+jK3KbJbFoyOryCww= github.com/hashicorp/vault-plugin-auth-centrify v0.5.2-0.20190814210042-090ec2ed93ce/go.mod h1:WstOCHERNbk2dblnY5MV9Qeh/hzTSQpVs5xPuyAzlBo= github.com/hashicorp/vault-plugin-auth-cf v0.0.0-20190821162840-1c2205826fee h1:gJG1PJGiqi+0M0HTYlwDyV5CyetLhFl9DxyMJre5H9Y= github.com/hashicorp/vault-plugin-auth-cf v0.0.0-20190821162840-1c2205826fee/go.mod h1:zOag32+pm1R4FFNhXMLP506Oesjoai3gHEEpxqUaTr0= -github.com/hashicorp/vault-plugin-auth-gcp v0.5.1 h1:8DR00s+Wmc21i3sfzvsqW88VMdf6NI2ue+onGoHshww= github.com/hashicorp/vault-plugin-auth-gcp v0.5.1/go.mod h1:eLj92eX8MPI4vY1jaazVLF2sVbSAJ3LRHLRhF/pUmlI= github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190930204802-acfd134850c2 h1:gtpqHauSoJCxZStLVWKMQcsdW61EewJSoegMrZLQ/GU= github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190930204802-acfd134850c2/go.mod h1:j0hMnnTD44zXGQhLM1jarYDaTmSp6OPiOzgFQ6mNgzc= @@ -392,18 +389,16 @@ github.com/hashicorp/vault-plugin-secrets-ad v0.6.2 h1:gz3TolHTqE+HitO5FXwrDhXeZ github.com/hashicorp/vault-plugin-secrets-ad v0.6.2/go.mod h1:beQFPnMIABuWeeCVPFbSAWmJgm3kcGqpswGOGOiJjYc= github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190814210129-4d18bec92f56 h1:PGE26//x1eiAbZ1ExffhKa4y9xgDKLd9BHDZRkOzbEY= github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190814210129-4d18bec92f56/go.mod h1:hJ42zFd3bHyE8O2liBUG+VPY0JxdMrj51TOwVGViUIU= -github.com/hashicorp/vault-plugin-secrets-azure v0.5.2 h1:8Jz4kl0D4+DPpP13jbIrysv1RYogUBucxC4D5xPBkiA= -github.com/hashicorp/vault-plugin-secrets-azure v0.5.2/go.mod h1:SBc53adxMmf+o8zqRbqYvq+nuSrz8OHYmgmPfxVMJEo= -github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191112195538-3c798536d157 h1:fXpYB9aF6Jgv0tZFjh46GqEkH7jIiGAwkD9Gkh2RuDw= -github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191112195538-3c798536d157/go.mod h1:Sc+ba3kscakE5a/pi8JJhWvXWok3cpt1P77DApmUuDc= +github.com/hashicorp/vault-plugin-secrets-azure v0.5.3-0.20191119150734-45c076c82f1d h1:Upi6xoJeeElnius+UKQhnG92Qg+OK6RU7MUtV4EnTWE= +github.com/hashicorp/vault-plugin-secrets-azure v0.5.3-0.20191119150734-45c076c82f1d/go.mod h1:EPotRM+iyjLgjheuUxdhrRNqXi4PxkDZJQDjbolNUCE= +github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191119222840-524d5b57ed36 h1:bZudT1kURwa3Den34VKSwn/RHevUeY5kF6zMKwQ19vA= +github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191119222840-524d5b57ed36/go.mod h1:2szK4N9wdRkaKFyPrde+2a5DeR6IQs1fOBPyy7XBuTY= github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190814210149-315cdbf5de6e h1:RjQBOFneGwxhHsymNtbEUJXAjMO74GlZcmUrGqJnYxY= github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190814210149-315cdbf5de6e/go.mod h1:5prAHuCcBiyv+xfGBviTVYeDQUhmQYN7WrxC2gMRWeQ= github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20191017213228-e8cf7060a4d0 h1:w4qR/yfqWOYmncR1HK1CVU7iHkqgcf0USWtbp/fTHM4= github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20191017213228-e8cf7060a4d0/go.mod h1:H0VKQagsJoK9o2qpULMgbspuWVnFe3G4S/K7f0Dr8qY= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -457,21 +452,17 @@ github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.0 h1:YNOwxxSJzSUARoD9KRZLzM9Y858MNGCOACTvCW9TSAc= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -496,14 +487,12 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= -github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8 h1:1CO5wil3HuiVLrUQ2ovSTO+6AfNOA5EMkHHVyHE9IwA= github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -522,13 +511,11 @@ github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -541,26 +528,23 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/oracle/oci-go-sdk v7.0.0+incompatible h1:oj5ESjXwwkFRdhZSnPlShvLWYdt/IZ65RQxveYM3maA= github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= -github.com/ory/dockertest v3.3.4+incompatible h1:VrpM6Gqg7CrPm3bL4Wm1skO+zFWLbh7/Xb5kGEbJRh8= +github.com/oracle/oci-go-sdk v12.5.0+incompatible h1:pr08ECoaDKHWO9tnzJB1YqClEs7ZK1CFOez2DQocH14= +github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -573,24 +557,20 @@ github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAm github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d h1:PinQItctnaL2LtkaSM678+ZLLy5TajwOeXzWvYC7tII= github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= -github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 h1:13pIdM2tpaDi4OVe24fgoIS7ZTqMt0QI+bwQsX5hq+g= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L7EX0km2LYM8HKpNWRiouxjE3XHkyGc= github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be h1:MoyXp/VjXUwM0GyDcdwT7Ubea2gxOSHpPaFo3qV+Y2A= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -616,9 +596,7 @@ github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+D github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -631,19 +609,16 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2 github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -671,11 +646,8 @@ go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -685,17 +657,14 @@ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= -golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4 h1:wviDUSmtheHRBfoY8B9U8ELl2USoXi2YFwdGdpIIkzI= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191106202628-ed6320f186d4 h1:PDpCLFAH/YIX0QpHPf2eO7L4rC2OOirBrKtXTLLiNTY= golang.org/x/crypto v0.0.0-20191106202628-ed6320f186d4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -709,7 +678,6 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -722,9 +690,8 @@ golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -736,20 +703,19 @@ golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAG golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -761,26 +727,22 @@ golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= @@ -793,8 +755,8 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190718200317-82a3ea8a504c/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -803,37 +765,36 @@ google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+ google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2 h1:iTp+3yyl/KOtxa/d1/JUE0GGSoR6FuW5udver22iwpw= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.5.0 h1:lj9SyhMzyoa38fgFF0oO2T6pjs5IzkLPKfVtxpyCRMM= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.14.0 h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw= google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80 h1:GL7nK1hkDKrkor0eVOYcMdIsUGErFnaC2gpBOVC+vbI= google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c h1:m9avZ3wyOWBR0fLC+qWbMBulk+Jiiqelngssgp8jfIs= google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -883,6 +844,7 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190409092523-d687e77c8ae9 h1:c9UEl5z8gk1DGh/g3snETZ+a52YeR9VdbX/3BQ4PHas= k8s.io/api v0.0.0-20190409092523-d687e77c8ae9/go.mod h1:FQEUn50aaytlU65qqBn/w+5ugllHwrBzKm7DzbnXdzE= diff --git a/physical/dynamodb/dynamodb.go b/physical/dynamodb/dynamodb.go index 73db322dbc..83d97aaa93 100644 --- a/physical/dynamodb/dynamodb.go +++ b/physical/dynamodb/dynamodb.go @@ -24,7 +24,7 @@ import ( "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/awsutil" + "github.com/hashicorp/vault/sdk/helper/awsutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/physical" ) diff --git a/physical/raft/raft.go b/physical/raft/raft.go index d1fdfcb0de..2bee74e96d 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -16,6 +16,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/go-raftchunking" "github.com/hashicorp/go-uuid" "github.com/hashicorp/raft" @@ -658,7 +659,7 @@ func (b *RaftBackend) Peers(ctx context.Context) ([]Peer, error) { // Snapshot takes a raft snapshot, packages it into a archive file and writes it // to the provided writer. Seal access is used to encrypt the SHASUM file so we // can validate the snapshot was taken using the same master keys or not. -func (b *RaftBackend) Snapshot(out *logical.HTTPResponseWriter, access seal.Access) error { +func (b *RaftBackend) Snapshot(out *logical.HTTPResponseWriter, access *seal.Access) error { b.l.RLock() defer b.l.RUnlock() @@ -701,7 +702,7 @@ func (b *RaftBackend) Snapshot(out *logical.HTTPResponseWriter, access seal.Acce // access is used to decrypt the SHASUM file in the archive to ensure this // snapshot has the same master key as the running instance. If the provided // access is nil then it will skip that validation. -func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access seal.Access) (*os.File, func(), raft.SnapshotMeta, error) { +func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access *seal.Access) (*os.File, func(), raft.SnapshotMeta, error) { b.l.RLock() defer b.l.RUnlock() @@ -1036,7 +1037,7 @@ func (l *RaftLock) Value() (bool, string, error) { // sealer implements the snapshot.Sealer interface and is used in the snapshot // process for encrypting/decrypting the SHASUM file in snapshot archives. type sealer struct { - access seal.Access + access *seal.Access } // Seal encrypts the data with using the seal access object. @@ -1044,7 +1045,7 @@ func (s sealer) Seal(ctx context.Context, pt []byte) ([]byte, error) { if s.access == nil { return nil, errors.New("no seal access available") } - eblob, err := s.access.Encrypt(ctx, pt) + eblob, err := s.access.Encrypt(ctx, pt, nil) if err != nil { return nil, err } @@ -1058,11 +1059,11 @@ func (s sealer) Open(ctx context.Context, ct []byte) ([]byte, error) { return nil, errors.New("no seal access available") } - var eblob physical.EncryptedBlobInfo + var eblob wrapping.EncryptedBlobInfo err := proto.Unmarshal(ct, &eblob) if err != nil { return nil, err } - return s.access.Decrypt(ctx, &eblob) + return s.access.Decrypt(ctx, &eblob, nil) } diff --git a/physical/s3/s3.go b/physical/s3/s3.go index c7a6058dbb..de036a854f 100644 --- a/physical/s3/s3.go +++ b/physical/s3/s3.go @@ -21,7 +21,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-cleanhttp" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/awsutil" + "github.com/hashicorp/vault/sdk/helper/awsutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/parseutil" "github.com/hashicorp/vault/sdk/physical" diff --git a/physical/s3/s3_test.go b/physical/s3/s3_test.go index f29a6ea289..becb89b06b 100644 --- a/physical/s3/s3_test.go +++ b/physical/s3/s3_test.go @@ -11,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/awsutil" + "github.com/hashicorp/vault/sdk/helper/awsutil" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) diff --git a/sdk/framework/backend.go b/sdk/framework/backend.go index a92025c7d9..8a9e313b85 100644 --- a/sdk/framework/backend.go +++ b/sdk/framework/backend.go @@ -15,9 +15,9 @@ import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/entropy" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/license" "github.com/hashicorp/vault/sdk/helper/logging" diff --git a/sdk/go.mod b/sdk/go.mod index 5589866850..b7a961a57d 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -5,14 +5,16 @@ go 1.13 require ( github.com/armon/go-metrics v0.3.0 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 + github.com/aws/aws-sdk-go v1.25.37 github.com/fatih/structs v1.1.0 github.com/go-ldap/ldap/v3 v3.1.3 github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 - github.com/golang/protobuf v1.3.1 + github.com/golang/protobuf v1.3.2 github.com/golang/snappy v0.0.1 github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-hclog v0.10.1 github.com/hashicorp/go-immutable-radix v1.0.0 + github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88 github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-plugin v1.0.1 github.com/hashicorp/go-sockaddr v1.0.2 @@ -20,16 +22,12 @@ require ( github.com/hashicorp/go-version v1.1.0 github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/go-testing-interface v1.0.0 github.com/mitchellh/mapstructure v1.1.2 github.com/pierrec/lz4 v2.0.5+incompatible github.com/ryanuber/go-glob v1.0.0 - golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 - golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 // indirect + golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 golang.org/x/sys v0.0.0-20191008105621-543471e840be - golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db // indirect - google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 // indirect google.golang.org/grpc v1.22.0 ) diff --git a/sdk/go.sum b/sdk/go.sum index 6e9f6e8b17..4b6f495d48 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -1,12 +1,36 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.25.37 h1:gBtB/F3dophWpsUQKN/Kni+JzYEH2mGHF4hWNtfED1w= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= @@ -14,6 +38,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= @@ -24,32 +50,50 @@ github.com/go-ldap/ldap/v3 v3.1.3 h1:RIgdpHXJpsUqUK5WXwKyVsESrGFqo5BRWPk3RR4/ogQ github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.10.1 h1:uyt/l0dWjJ879yiAu+T7FG3/6QX+zwm4bQ8P7XsYt3o= github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88 h1:YMLQiDrLQT1QEollUbw+yxpW0yJiKVHDGmMpBISeACA= +github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88/go.mod h1:Pm+Umb/6Gij6ZG534L7QDyvkauaOQWGb+arj9aFjCE0= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= @@ -59,25 +103,31 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= +github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -86,8 +136,11 @@ github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQz github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= @@ -95,72 +148,98 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc h1:MeuS1UDyZyFH++6vVy44PuufTeFF0d0nfI6XB87YGSk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/helper/awsutil/error.go b/sdk/helper/awsutil/error.go similarity index 78% rename from helper/awsutil/error.go rename to sdk/helper/awsutil/error.go index 708382766f..248ab82e3d 100644 --- a/helper/awsutil/error.go +++ b/sdk/helper/awsutil/error.go @@ -1,11 +1,14 @@ package awsutil import ( + "errors" + awsRequest "github.com/aws/aws-sdk-go/aws/request" multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/sdk/logical" ) +var ErrUpstreamRateLimited = errors.New("upstream rate limited") + // CheckAWSError will examine an error and convert to a logical error if // appropriate. If no appropriate error is found, return nil func CheckAWSError(err error) error { @@ -13,17 +16,17 @@ func CheckAWSError(err error) error { // known request limiting errors: // https://github.com/aws/aws-sdk-go/blob/488d634b5a699b9118ac2befb5135922b4a77210/aws/request/retryer.go#L35 if awsRequest.IsErrorThrottle(err) { - return logical.ErrUpstreamRateLimited + return ErrUpstreamRateLimited } return nil } -// AppendLogicalError checks if the given error is a known AWS error we modify, +// AppendAWSError checks if the given error is a known AWS error we modify, // and if so then returns a go-multierror, appending the original and the -// logical error. +// AWS error. // If the error is not an AWS error, or not an error we wish to modify, then // return the original error. -func AppendLogicalError(err error) error { +func AppendAWSError(err error) error { if awserr := CheckAWSError(err); awserr != nil { err = multierror.Append(err, awserr) } diff --git a/helper/awsutil/error_test.go b/sdk/helper/awsutil/error_test.go similarity index 86% rename from helper/awsutil/error_test.go rename to sdk/helper/awsutil/error_test.go index c6ea8456d9..9a4851ea4d 100644 --- a/helper/awsutil/error_test.go +++ b/sdk/helper/awsutil/error_test.go @@ -6,7 +6,6 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/sdk/logical" ) func Test_CheckAWSError(t *testing.T) { @@ -22,12 +21,12 @@ func Test_CheckAWSError(t *testing.T) { { Name: "Upstream throttle error", Err: awserr.New("Throttling", "", nil), - Expected: logical.ErrUpstreamRateLimited, + Expected: ErrUpstreamRateLimited, }, { Name: "Upstream RequestLimitExceeded", Err: awserr.New("RequestLimitExceeded", "Request rate limited", nil), - Expected: logical.ErrUpstreamRateLimited, + Expected: ErrUpstreamRateLimited, }, } @@ -47,7 +46,7 @@ func Test_CheckAWSError(t *testing.T) { } } -func Test_AppendLogicalError(t *testing.T) { +func Test_AppendRateLimitedError(t *testing.T) { awsErr := awserr.New("Throttling", "", nil) testCases := []struct { Name string @@ -62,7 +61,7 @@ func Test_AppendLogicalError(t *testing.T) { { Name: "Upstream throttle error", Err: awsErr, - Expected: multierror.Append(awsErr, logical.ErrUpstreamRateLimited), + Expected: multierror.Append(awsErr, ErrUpstreamRateLimited), }, { Name: "Nil", @@ -71,7 +70,7 @@ func Test_AppendLogicalError(t *testing.T) { for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - err := AppendLogicalError(tc.Err) + err := AppendAWSError(tc.Err) if err == nil && tc.Expected != nil { t.Fatalf("expected non-nil error (%#v), got nil", tc.Expected) } diff --git a/helper/awsutil/generate_credentials.go b/sdk/helper/awsutil/generate_credentials.go similarity index 100% rename from helper/awsutil/generate_credentials.go rename to sdk/helper/awsutil/generate_credentials.go diff --git a/sdk/helper/awsutil/region.go b/sdk/helper/awsutil/region.go new file mode 100644 index 0000000000..7ab0c21e1c --- /dev/null +++ b/sdk/helper/awsutil/region.go @@ -0,0 +1,73 @@ +package awsutil + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/hashicorp/errwrap" +) + +// "us-east-1 is used because it's where AWS first provides support for new features, +// is a widely used region, and is the most common one for some services like STS. +const DefaultRegion = "us-east-1" + +var ec2MetadataBaseURL = "http://169.254.169.254" + +/* +It's impossible to mimic "normal" AWS behavior here because it's not consistent +or well-defined. For example, boto3, the Python SDK (which the aws cli uses), +loads `~/.aws/config` by default and only reads the `AWS_DEFAULT_REGION` environment +variable (and not `AWS_REGION`, while the golang SDK does _mostly_ the opposite -- it +reads the region **only** from `AWS_REGION` and not at all `~/.aws/config`, **unless** +the `AWS_SDK_LOAD_CONFIG` environment variable is set. So, we must define our own +approach to walking AWS config and deciding what to use. + +Our chosen approach is: + + "More specific takes precedence over less specific." + +1. User-provided configuration is the most explicit. +2. Environment variables are potentially shared across many invocations and so they have less precedence. +3. Configuration in `~/.aws/config` is shared across all invocations of a given user and so this has even less precedence. +4. Configuration retrieved from the EC2 instance metadata service is shared by all invocations on a given machine, and so it has the lowest precedence. + +This approach should be used in future updates to this logic. +*/ +func GetRegion(configuredRegion string) (string, error) { + if configuredRegion != "" { + return configuredRegion, nil + } + + sess, err := session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + }) + if err != nil { + return "", errwrap.Wrapf("got error when starting session: {{err}}", err) + } + + region := aws.StringValue(sess.Config.Region) + if region != "" { + return region, nil + } + + metadata := ec2metadata.New(sess, &aws.Config{ + Endpoint: aws.String(ec2MetadataBaseURL + "/latest"), + EC2MetadataDisableTimeoutOverride: aws.Bool(true), + HTTPClient: &http.Client{ + Timeout: time.Second, + }, + }) + if !metadata.Available() { + return DefaultRegion, nil + } + + region, err = metadata.Region() + if err != nil { + return "", errwrap.Wrapf("unable to retrieve region from instance metadata: {{err}}", err) + } + + return region, nil +} diff --git a/helper/awsutil/region_test.go b/sdk/helper/awsutil/region_test.go similarity index 86% rename from helper/awsutil/region_test.go rename to sdk/helper/awsutil/region_test.go index 25a6c284ca..9928d3982a 100644 --- a/helper/awsutil/region_test.go +++ b/sdk/helper/awsutil/region_test.go @@ -10,7 +10,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - hclog "github.com/hashicorp/go-hclog" ) const testConfigFile = `[default] @@ -20,13 +19,12 @@ output=json` var ( shouldTestFiles = os.Getenv("VAULT_ACC_AWS_FILES") == "1" - logger = hclog.NewNullLogger() expectedTestRegion = "us-west-2" unexpectedTestRegion = "us-east-2" regionEnvKeys = []string{"AWS_REGION", "AWS_DEFAULT_REGION"} ) -func TestGetOrDefaultRegion_UserConfigPreferredFirst(t *testing.T) { +func TestGetRegion_UserConfigPreferredFirst(t *testing.T) { configuredRegion := expectedTestRegion cleanupEnv := setEnvRegion(t, unexpectedTestRegion) @@ -38,13 +36,16 @@ func TestGetOrDefaultRegion_UserConfigPreferredFirst(t *testing.T) { cleanupMetadata := setInstanceMetadata(t, unexpectedTestRegion) defer cleanupMetadata() - result := GetOrDefaultRegion(logger, configuredRegion) + result, err := GetRegion(configuredRegion) + if err != nil { + t.Fatal(err) + } if result != expectedTestRegion { t.Fatalf("expected: %s; actual: %s", expectedTestRegion, result) } } -func TestGetOrDefaultRegion_EnvVarsPreferredSecond(t *testing.T) { +func TestGetRegion_EnvVarsPreferredSecond(t *testing.T) { configuredRegion := "" cleanupEnv := setEnvRegion(t, expectedTestRegion) @@ -56,13 +57,16 @@ func TestGetOrDefaultRegion_EnvVarsPreferredSecond(t *testing.T) { cleanupMetadata := setInstanceMetadata(t, unexpectedTestRegion) defer cleanupMetadata() - result := GetOrDefaultRegion(logger, configuredRegion) + result, err := GetRegion(configuredRegion) + if err != nil { + t.Fatal(err) + } if result != expectedTestRegion { t.Fatalf("expected: %s; actual: %s", expectedTestRegion, result) } } -func TestGetOrDefaultRegion_ConfigFilesPreferredThird(t *testing.T) { +func TestGetRegion_ConfigFilesPreferredThird(t *testing.T) { if !shouldTestFiles { // In some test environments, like a CI environment, we may not have the // permissions to write to the ~/.aws/config file. Thus, this test is off @@ -80,13 +84,16 @@ func TestGetOrDefaultRegion_ConfigFilesPreferredThird(t *testing.T) { cleanupMetadata := setInstanceMetadata(t, unexpectedTestRegion) defer cleanupMetadata() - result := GetOrDefaultRegion(logger, configuredRegion) + result, err := GetRegion(configuredRegion) + if err != nil { + t.Fatal(err) + } if result != expectedTestRegion { t.Fatalf("expected: %s; actual: %s", expectedTestRegion, result) } } -func TestGetOrDefaultRegion_ConfigFileUnfound(t *testing.T) { +func TestGetRegion_ConfigFileUnfound(t *testing.T) { if enabled := os.Getenv("VAULT_ACC"); enabled == "" { t.Skip() } @@ -104,13 +111,16 @@ func TestGetOrDefaultRegion_ConfigFileUnfound(t *testing.T) { } }() - result := GetOrDefaultRegion(logger, configuredRegion) + result, err := GetRegion(configuredRegion) + if err != nil { + t.Fatal(err) + } if result != DefaultRegion { t.Fatalf("expected: %s; actual: %s", DefaultRegion, result) } } -func TestGetOrDefaultRegion_EC2InstanceMetadataPreferredFourth(t *testing.T) { +func TestGetRegion_EC2InstanceMetadataPreferredFourth(t *testing.T) { if !shouldTestFiles { // In some test environments, like a CI environment, we may not have the // permissions to write to the ~/.aws/config file. Thus, this test is off @@ -128,13 +138,16 @@ func TestGetOrDefaultRegion_EC2InstanceMetadataPreferredFourth(t *testing.T) { cleanupMetadata := setInstanceMetadata(t, expectedTestRegion) defer cleanupMetadata() - result := GetOrDefaultRegion(logger, configuredRegion) + result, err := GetRegion(configuredRegion) + if err != nil { + t.Fatal(err) + } if result != expectedTestRegion { t.Fatalf("expected: %s; actual: %s", expectedTestRegion, result) } } -func TestGetOrDefaultRegion_DefaultsToDefaultRegionWhenRegionUnavailable(t *testing.T) { +func TestGetRegion_DefaultsToDefaultRegionWhenRegionUnavailable(t *testing.T) { if enabled := os.Getenv("VAULT_ACC"); enabled == "" { t.Skip() } @@ -147,7 +160,10 @@ func TestGetOrDefaultRegion_DefaultsToDefaultRegionWhenRegionUnavailable(t *test cleanupFile := setConfigFileRegion(t, "") defer cleanupFile() - result := GetOrDefaultRegion(logger, configuredRegion) + result, err := GetRegion(configuredRegion) + if err != nil { + t.Fatal(err) + } if result != DefaultRegion { t.Fatalf("expected: %s; actual: %s", DefaultRegion, result) } diff --git a/sdk/helper/entropy/entropy.go b/sdk/helper/entropy/entropy.go deleted file mode 100644 index afaef76edb..0000000000 --- a/sdk/helper/entropy/entropy.go +++ /dev/null @@ -1,37 +0,0 @@ -package entropy - -import ( - "fmt" - - "github.com/hashicorp/errwrap" -) - -type Sourcer interface { - GetRandom(bytes int) ([]byte, error) -} - -type Reader struct { - source Sourcer -} - -func NewReader(source Sourcer) *Reader { - return &Reader{source} -} - -// Read reads exactly len(p) bytes from r into p. -// If r returns an error having read at least len(p) bytes, the error is dropped. -// It returns the number of bytes copied and an error if fewer bytes were read. -// On return, n == len(p) if and only if err == nil. -func (r *Reader) Read(p []byte) (n int, err error) { - requested := len(p) - randBytes, err := r.source.GetRandom(requested) - delivered := copy(p, randBytes) - if delivered != requested { - if err != nil { - return delivered, errwrap.Wrapf("unable to fill provided buffer with entropy: {{err}}", err) - } - return delivered, fmt.Errorf("unable to fill provided buffer with entropy") - } - - return delivered, nil -} diff --git a/sdk/helper/entropy/entropy_test.go b/sdk/helper/entropy/entropy_test.go deleted file mode 100644 index 3bfb203f0f..0000000000 --- a/sdk/helper/entropy/entropy_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package entropy - -import ( - "bytes" - "errors" - "fmt" - "testing" - - "github.com/hashicorp/errwrap" -) - -type mockSourcer struct{} -type mockSourcerWithError struct{} -type mockSourcerFailureWithError struct{} -type mockSourcerFailureWithoutError struct{} - -// simulates a successful sourcer -func (m *mockSourcer) GetRandom(bytes int) ([]byte, error) { - return make([]byte, bytes), nil -} - -// simulates a sourcer that reads in the requested number of bytes but encounters an error. -// Read should drop any error if the number of bytes specified were successfully read. -func (m *mockSourcerWithError) GetRandom(bytes int) ([]byte, error) { - return make([]byte, bytes), errors.New("boom but you shouldn't care") -} - -func (m *mockSourcerFailureWithError) GetRandom(bytes int) ([]byte, error) { - numRetBytes := bytes - 1 - return make([]byte, numRetBytes), fmt.Errorf("requested %d bytes of entropy but only filled %d", bytes, numRetBytes) -} - -func (m *mockSourcerFailureWithoutError) GetRandom(bytes int) ([]byte, error) { - numRetBytes := bytes - 1 - return make([]byte, numRetBytes), nil -} - -func TestRead(t *testing.T) { - var tests = []struct { - sourcer Sourcer - preReadBuff []byte - postReadBuff []byte - outErr error - }{ - { - new(mockSourcer), - []byte{1, 2, 3, 4}, - []byte{0, 0, 0, 0}, - nil, - }, - { - new(mockSourcerWithError), - []byte{1, 2, 3, 4}, - []byte{0, 0, 0, 0}, - nil, - }, - { - new(mockSourcerFailureWithError), - []byte{1, 2, 3, 4}, - nil, - errwrap.Wrapf("unable to fill provided buffer with entropy: {{err}}", fmt.Errorf("requested %d bytes of entropy but only filled %d", 4, 3)), - }, - { - new(mockSourcerFailureWithoutError), - []byte{1, 2, 3, 4}, - nil, - fmt.Errorf("unable to fill provided buffer with entropy"), - }, - } - - for _, test := range tests { - mockReader := NewReader(test.sourcer) - buff := make([]byte, len(test.preReadBuff)) - copy(buff, test.preReadBuff) - _, err := mockReader.Read(buff) - // validate the error, both should be nil or have the same Error() - switch { - case err != nil && test.outErr != nil: - if err.Error() != test.outErr.Error() { - t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr) - } - case err != test.outErr: - t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr) - case err == nil && !bytes.Equal(buff, test.postReadBuff): - t.Fatalf("after read expected buff to be: %#v but got: %#v", test.postReadBuff, buff) - } - } -} diff --git a/sdk/physical/entry.go b/sdk/physical/entry.go index a662dfdd33..c02744a3e1 100644 --- a/sdk/physical/entry.go +++ b/sdk/physical/entry.go @@ -1,5 +1,7 @@ package physical +import wrapping "github.com/hashicorp/go-kms-wrapping" + // Entry is used to represent data stored by the physical backend type Entry struct { Key string @@ -13,5 +15,5 @@ type Entry struct { // is used to carry information about whether seal wrapping is *desired* // regardless of whether it's currently available. The struct below stores // needed information when it's actually performed. - SealWrapInfo *EncryptedBlobInfo `json:"seal_wrap_info,omitempty"` + SealWrapInfo *wrapping.EncryptedBlobInfo `json:"seal_wrap_info,omitempty"` } diff --git a/sdk/physical/types.pb.go b/sdk/physical/types.pb.go deleted file mode 100644 index fc9e04a430..0000000000 --- a/sdk/physical/types.pb.go +++ /dev/null @@ -1,221 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: sdk/physical/types.proto - -package physical - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type EncryptedBlobInfo struct { - Ciphertext []byte `protobuf:"bytes,1,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` - IV []byte `protobuf:"bytes,2,opt,name=iv,proto3" json:"iv,omitempty"` - HMAC []byte `protobuf:"bytes,3,opt,name=hmac,proto3" json:"hmac,omitempty"` - Wrapped bool `protobuf:"varint,4,opt,name=wrapped,proto3" json:"wrapped,omitempty"` - KeyInfo *SealKeyInfo `protobuf:"bytes,5,opt,name=key_info,json=keyInfo,proto3" json:"key_info,omitempty"` - // Key is the Key value for the entry that corresponds to - // physical.Entry.Key's value - Key string `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EncryptedBlobInfo) Reset() { *m = EncryptedBlobInfo{} } -func (m *EncryptedBlobInfo) String() string { return proto.CompactTextString(m) } -func (*EncryptedBlobInfo) ProtoMessage() {} -func (*EncryptedBlobInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_21dce1f497d1541e, []int{0} -} - -func (m *EncryptedBlobInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EncryptedBlobInfo.Unmarshal(m, b) -} -func (m *EncryptedBlobInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EncryptedBlobInfo.Marshal(b, m, deterministic) -} -func (m *EncryptedBlobInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_EncryptedBlobInfo.Merge(m, src) -} -func (m *EncryptedBlobInfo) XXX_Size() int { - return xxx_messageInfo_EncryptedBlobInfo.Size(m) -} -func (m *EncryptedBlobInfo) XXX_DiscardUnknown() { - xxx_messageInfo_EncryptedBlobInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_EncryptedBlobInfo proto.InternalMessageInfo - -func (m *EncryptedBlobInfo) GetCiphertext() []byte { - if m != nil { - return m.Ciphertext - } - return nil -} - -func (m *EncryptedBlobInfo) GetIV() []byte { - if m != nil { - return m.IV - } - return nil -} - -func (m *EncryptedBlobInfo) GetHMAC() []byte { - if m != nil { - return m.HMAC - } - return nil -} - -func (m *EncryptedBlobInfo) GetWrapped() bool { - if m != nil { - return m.Wrapped - } - return false -} - -func (m *EncryptedBlobInfo) GetKeyInfo() *SealKeyInfo { - if m != nil { - return m.KeyInfo - } - return nil -} - -func (m *EncryptedBlobInfo) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -// SealKeyInfo contains information regarding the seal used to encrypt the entry. -type SealKeyInfo struct { - // Mechanism is the method used by the seal to encrypt and sign the - // data as defined by the seal. - Mechanism uint64 `protobuf:"varint,1,opt,name=Mechanism,proto3" json:"Mechanism,omitempty"` - HMACMechanism uint64 `protobuf:"varint,2,opt,name=HMACMechanism,proto3" json:"HMACMechanism,omitempty"` - // This is an opaque ID used by the seal to identify the specific - // key to use as defined by the seal. This could be a version, key - // label, or something else. - KeyID string `protobuf:"bytes,3,opt,name=KeyID,proto3" json:"KeyID,omitempty"` - HMACKeyID string `protobuf:"bytes,4,opt,name=HMACKeyID,proto3" json:"HMACKeyID,omitempty"` - // These value are used when generating our own data encryption keys - // and encrypting them using the autoseal - WrappedKey []byte `protobuf:"bytes,5,opt,name=WrappedKey,proto3" json:"WrappedKey,omitempty"` - // Mechanism specific flags - Flags uint64 `protobuf:"varint,6,opt,name=Flags,proto3" json:"Flags,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SealKeyInfo) Reset() { *m = SealKeyInfo{} } -func (m *SealKeyInfo) String() string { return proto.CompactTextString(m) } -func (*SealKeyInfo) ProtoMessage() {} -func (*SealKeyInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_21dce1f497d1541e, []int{1} -} - -func (m *SealKeyInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SealKeyInfo.Unmarshal(m, b) -} -func (m *SealKeyInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SealKeyInfo.Marshal(b, m, deterministic) -} -func (m *SealKeyInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SealKeyInfo.Merge(m, src) -} -func (m *SealKeyInfo) XXX_Size() int { - return xxx_messageInfo_SealKeyInfo.Size(m) -} -func (m *SealKeyInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SealKeyInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SealKeyInfo proto.InternalMessageInfo - -func (m *SealKeyInfo) GetMechanism() uint64 { - if m != nil { - return m.Mechanism - } - return 0 -} - -func (m *SealKeyInfo) GetHMACMechanism() uint64 { - if m != nil { - return m.HMACMechanism - } - return 0 -} - -func (m *SealKeyInfo) GetKeyID() string { - if m != nil { - return m.KeyID - } - return "" -} - -func (m *SealKeyInfo) GetHMACKeyID() string { - if m != nil { - return m.HMACKeyID - } - return "" -} - -func (m *SealKeyInfo) GetWrappedKey() []byte { - if m != nil { - return m.WrappedKey - } - return nil -} - -func (m *SealKeyInfo) GetFlags() uint64 { - if m != nil { - return m.Flags - } - return 0 -} - -func init() { - proto.RegisterType((*EncryptedBlobInfo)(nil), "physical.EncryptedBlobInfo") - proto.RegisterType((*SealKeyInfo)(nil), "physical.SealKeyInfo") -} - -func init() { proto.RegisterFile("sdk/physical/types.proto", fileDescriptor_21dce1f497d1541e) } - -var fileDescriptor_21dce1f497d1541e = []byte{ - // 316 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0x5d, 0x4b, 0xfb, 0x30, - 0x14, 0xc6, 0x69, 0xd7, 0xbd, 0x9d, 0xed, 0xff, 0x47, 0x83, 0x42, 0x2e, 0x44, 0xca, 0x10, 0xac, - 0x37, 0xad, 0xe8, 0x27, 0x70, 0xbe, 0xa0, 0x8c, 0xdd, 0xc4, 0x0b, 0xc1, 0x1b, 0xc9, 0xb2, 0x6c, - 0x09, 0xed, 0x9a, 0xd0, 0x66, 0xd3, 0x7c, 0x30, 0xaf, 0xfc, 0x72, 0x92, 0x94, 0xb1, 0x7a, 0x77, - 0xce, 0x2f, 0x0f, 0x0f, 0xcf, 0x93, 0x03, 0xb8, 0x5e, 0xe6, 0x99, 0x16, 0xb6, 0x96, 0x8c, 0x16, - 0x99, 0xb1, 0x9a, 0xd7, 0xa9, 0xae, 0x94, 0x51, 0x68, 0xb0, 0xa7, 0x93, 0xef, 0x00, 0x8e, 0x1f, - 0x4b, 0x56, 0x59, 0x6d, 0xf8, 0x72, 0x5a, 0xa8, 0xc5, 0x4b, 0xb9, 0x52, 0xe8, 0x1c, 0x80, 0x49, - 0x2d, 0x78, 0x65, 0xf8, 0x97, 0xc1, 0x41, 0x1c, 0x24, 0x63, 0xd2, 0x22, 0xe8, 0x3f, 0x84, 0x72, - 0x87, 0x43, 0xcf, 0x43, 0xb9, 0x43, 0x08, 0x22, 0xb1, 0xa1, 0x0c, 0x77, 0x3c, 0xf1, 0x33, 0xc2, - 0xd0, 0xff, 0xac, 0xa8, 0xd6, 0x7c, 0x89, 0xa3, 0x38, 0x48, 0x06, 0x64, 0xbf, 0xa2, 0x6b, 0x18, - 0xe4, 0xdc, 0x7e, 0xc8, 0x72, 0xa5, 0x70, 0x37, 0x0e, 0x92, 0xd1, 0xcd, 0x69, 0xba, 0x0f, 0x94, - 0xbe, 0x72, 0x5a, 0xcc, 0xb8, 0x75, 0x31, 0x48, 0x3f, 0x6f, 0x06, 0x74, 0x04, 0x9d, 0x9c, 0x5b, - 0xdc, 0x8b, 0x83, 0x64, 0x48, 0xdc, 0x38, 0xf9, 0x09, 0x60, 0xd4, 0x92, 0xa2, 0x33, 0x18, 0xce, - 0x39, 0x13, 0xb4, 0x94, 0xf5, 0xc6, 0x07, 0x8e, 0xc8, 0x01, 0xa0, 0x0b, 0xf8, 0xf7, 0x3c, 0xbf, - 0xbb, 0x3f, 0x28, 0x42, 0xaf, 0xf8, 0x0b, 0xd1, 0x09, 0x74, 0x9d, 0xdd, 0x83, 0xaf, 0x31, 0x24, - 0xcd, 0xe2, 0x9c, 0x9d, 0xac, 0x79, 0x89, 0xfc, 0xcb, 0x01, 0xb8, 0x9f, 0x7a, 0x6b, 0x6a, 0xcd, - 0xb8, 0xf5, 0x6d, 0xc6, 0xa4, 0x45, 0x9c, 0xe7, 0x53, 0x41, 0xd7, 0xb5, 0xcf, 0x1e, 0x91, 0x66, - 0x99, 0x5e, 0xbd, 0x5f, 0xae, 0xa5, 0x11, 0xdb, 0x45, 0xca, 0xd4, 0x26, 0x13, 0xb4, 0x16, 0x92, - 0xa9, 0x4a, 0x67, 0x3b, 0xba, 0x2d, 0x4c, 0xd6, 0x3e, 0xdb, 0xa2, 0xe7, 0x2f, 0x76, 0xfb, 0x1b, - 0x00, 0x00, 0xff, 0xff, 0xc6, 0x34, 0xf8, 0x45, 0xcd, 0x01, 0x00, 0x00, -} diff --git a/sdk/physical/types.proto b/sdk/physical/types.proto deleted file mode 100644 index 1241382d3b..0000000000 --- a/sdk/physical/types.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/hashicorp/vault/sdk/physical"; - -package physical; - -message EncryptedBlobInfo { - bytes ciphertext = 1; - bytes iv = 2; - bytes hmac = 3; - bool wrapped = 4; - SealKeyInfo key_info = 5; - - // Key is the Key value for the entry that corresponds to - // physical.Entry.Key's value - string key = 6; -} - -// SealKeyInfo contains information regarding the seal used to encrypt the entry. -message SealKeyInfo { - // Mechanism is the method used by the seal to encrypt and sign the - // data as defined by the seal. - uint64 Mechanism = 1; - uint64 HMACMechanism = 2; - - // This is an opaque ID used by the seal to identify the specific - // key to use as defined by the seal. This could be a version, key - // label, or something else. - string KeyID = 3; - string HMACKeyID = 4; - - // These value are used when generating our own data encryption keys - // and encrypting them using the autoseal - bytes WrappedKey = 5; - - // Mechanism specific flags - uint64 Flags = 6; -} diff --git a/vault/core.go b/vault/core.go index 2612962704..fb3e343fff 100644 --- a/vault/core.go +++ b/vault/core.go @@ -21,6 +21,8 @@ import ( metrics "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" multierror "github.com/hashicorp/go-multierror" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" @@ -42,8 +44,7 @@ import ( sr "github.com/hashicorp/vault/serviceregistration" "github.com/hashicorp/vault/shamir" "github.com/hashicorp/vault/vault/cluster" - "github.com/hashicorp/vault/vault/seal" - shamirseal "github.com/hashicorp/vault/vault/seal/shamir" + vaultseal "github.com/hashicorp/vault/vault/seal" cache "github.com/patrickmn/go-cache" "google.golang.org/grpc" ) @@ -157,7 +158,7 @@ type unlockInformation struct { } type raftInformation struct { - challenge *physical.EncryptedBlobInfo + challenge *wrapping.EncryptedBlobInfo leaderClient *api.Client leaderBarrierConfig *SealConfig nonVoter bool @@ -770,7 +771,11 @@ func NewCore(conf *CoreConfig) (*Core, error) { } if c.seal == nil { - c.seal = NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("shamir"))) + c.seal = NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: c.logger.Named("shamir"), + }), + }) } c.seal.SetCore(c) @@ -990,17 +995,17 @@ func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) { } if masterKey != nil { - if c.seal.BarrierType() == seal.Shamir { + if c.seal.BarrierType() == wrapping.Shamir { // If this is a legacy shamir seal this serves no purpose but it // doesn't hurt. - err = c.seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(masterKey) + err = c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(masterKey) if err != nil { return false, err } } if !c.isRaftUnseal() { - if c.seal.BarrierType() == seal.Shamir { + if c.seal.BarrierType() == wrapping.Shamir { cfg, err := c.seal.BarrierConfig(ctx) if err != nil { return false, err @@ -1037,7 +1042,7 @@ func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) { go func() { keyringFound := false - haveMasterKey := c.seal.StoredKeysSupported() != StoredKeysSupportedShamirMaster + haveMasterKey := c.seal.StoredKeysSupported() != vaultseal.StoredKeysSupportedShamirMaster defer func() { if keyringFound && haveMasterKey { _, err := c.unsealInternal(ctx, masterKey) @@ -1171,7 +1176,7 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover // keys setup, nor 2) seals that support recovery keys but not stored keys. // If insufficient shares are provided, shamir.Combine will error, and if // no stored keys are found it will return masterKey as nil. - if seal.StoredKeysSupported() == StoredKeysSupportedGeneric { + if seal.StoredKeysSupported() == vaultseal.StoredKeysSupportedGeneric { masterKeyShares, err := seal.GetStoredKeys(ctx) if err != nil { return nil, errwrap.Wrapf("unable to retrieve stored keys: {{err}}", err) @@ -1196,8 +1201,8 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover // If we have a migration seal, now's the time! if c.migrationSeal != nil { - if seal.StoredKeysSupported() == StoredKeysSupportedShamirMaster { - err = seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(masterKey) + if seal.StoredKeysSupported() == vaultseal.StoredKeysSupportedShamirMaster { + err = seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(masterKey) if err != nil { return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err) } @@ -1245,7 +1250,7 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover // We have recovery keys; we're going to use them as the new // shamir KeK. - err = c.seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(recoveryKey) + err = c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(recoveryKey) if err != nil { return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err) } @@ -2048,7 +2053,7 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi // In older versions of vault the default seal would not store a type. This // is here to offer backwards compatibility for older seal configs. if barrierConf.Type == "" { - barrierConf.Type = seal.Shamir + barrierConf.Type = wrapping.Shamir } var recoveryConf *SealConfig @@ -2068,7 +2073,7 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi // In older versions of vault the default seal would not store a type. This // is here to offer backwards compatibility for older seal configs. if recoveryConf.Type == "" { - recoveryConf.Type = seal.Shamir + recoveryConf.Type = wrapping.Shamir } } @@ -2098,7 +2103,7 @@ func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) { // that can be used to unseal the barrier. func (c *Core) unsealKeyToMasterKey(ctx context.Context, combinedKey []byte) ([]byte, error) { switch c.seal.StoredKeysSupported() { - case StoredKeysSupportedGeneric: + case vaultseal.StoredKeysSupportedGeneric: if err := c.seal.VerifyRecoveryKey(ctx, combinedKey); err != nil { return nil, errwrap.Wrapf("recovery key verification failed: {{err}}", err) } @@ -2112,15 +2117,19 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, combinedKey []byte) ([] } return storedKeys[0], nil - case StoredKeysSupportedShamirMaster: - testseal := NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("testseal"))) + case vaultseal.StoredKeysSupportedShamirMaster: + testseal := NewDefaultSeal(&vaultseal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: c.logger.Named("testseal"), + }), + }) testseal.SetCore(c) cfg, err := c.seal.BarrierConfig(ctx) if err != nil { return nil, errwrap.Wrapf("failed to setup test barrier config: {{err}}", err) } testseal.SetCachedBarrierConfig(cfg) - err = testseal.GetAccess().(*shamirseal.ShamirSeal).SetKey(combinedKey) + err = testseal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(combinedKey) if err != nil { return nil, errwrap.Wrapf("failed to setup unseal key: {{err}}", err) } @@ -2133,7 +2142,7 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, combinedKey []byte) ([] } return storedKeys[0], nil - case StoredKeysNotSupported: + case vaultseal.StoredKeysNotSupported: return combinedKey, nil } return nil, fmt.Errorf("invalid seal") diff --git a/vault/external_tests/api/sys_rekey_ext_test.go b/vault/external_tests/api/sys_rekey_ext_test.go index 34f5df74c9..8f60eab9b0 100644 --- a/vault/external_tests/api/sys_rekey_ext_test.go +++ b/vault/external_tests/api/sys_rekey_ext_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical/inmem" "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/seal" ) func TestSysRekey_Verification(t *testing.T) { @@ -43,14 +44,14 @@ func testSysRekey_Verification(t *testing.T, recovery bool, legacyShamir bool) { panic("invalid case") } opts.SealFunc = func() vault.Seal { - return vault.NewTestSeal(t, &vault.TestSealOpts{ - StoredKeys: vault.StoredKeysSupportedGeneric, + return vault.NewTestSeal(t, &seal.TestSealOpts{ + StoredKeys: seal.StoredKeysSupportedGeneric, }) } case legacyShamir: opts.SealFunc = func() vault.Seal { - return vault.NewTestSeal(t, &vault.TestSealOpts{ - StoredKeys: vault.StoredKeysNotSupported, + return vault.NewTestSeal(t, &seal.TestSealOpts{ + StoredKeys: seal.StoredKeysNotSupported, }) } } diff --git a/vault/ha.go b/vault/ha.go index 08157a6a46..3844db2df5 100644 --- a/vault/ha.go +++ b/vault/ha.go @@ -10,10 +10,9 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/vault/vault/seal/shamir" - "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/namespace" @@ -23,6 +22,7 @@ import ( "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault/seal" "github.com/oklog/run" ) @@ -781,9 +781,9 @@ func (c *Core) reloadShamirKey(ctx context.Context) error { } var shamirKey []byte switch c.seal.StoredKeysSupported() { - case StoredKeysSupportedGeneric: + case seal.StoredKeysSupportedGeneric: return nil - case StoredKeysSupportedShamirMaster: + case seal.StoredKeysSupportedShamirMaster: entry, err := c.barrier.Get(ctx, shamirKekPath) if err != nil { return err @@ -792,14 +792,14 @@ func (c *Core) reloadShamirKey(ctx context.Context) error { return nil } shamirKey = entry.Value - case StoredKeysNotSupported: + case seal.StoredKeysNotSupported: keyring, err := c.barrier.Keyring() if err != nil { return errwrap.Wrapf("failed to update seal access: {{err}}", err) } shamirKey = keyring.masterKey } - return c.seal.GetAccess().(*shamir.ShamirSeal).SetKey(shamirKey) + return c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(shamirKey) } func (c *Core) performKeyUpgrades(ctx context.Context) error { diff --git a/vault/init.go b/vault/init.go index fe0207e89b..c4d0e662ad 100644 --- a/vault/init.go +++ b/vault/init.go @@ -9,14 +9,15 @@ import ( "net/url" "sync/atomic" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/vault/seal" "github.com/hashicorp/errwrap" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/shamir" - "github.com/hashicorp/vault/vault/seal" - shamirseal "github.com/hashicorp/vault/vault/seal/shamir" ) // InitParams keeps the init function from being littered with too many @@ -145,7 +146,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // which means both that the shares will be different *AND* there would // need to be a way to actually allow fetching of the generated keys by // operators. - if c.SealAccess().StoredKeysSupported() == StoredKeysSupportedGeneric { + if c.SealAccess().StoredKeysSupported() == seal.StoredKeysSupportedGeneric { if len(barrierConfig.PGPKeys) > 0 { return nil, fmt.Errorf("PGP keys not supported when storing shares") } @@ -254,7 +255,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes var sealKey []byte var sealKeyShares [][]byte - if barrierConfig.StoredShares == 1 && c.seal.BarrierType() == seal.Shamir { + if barrierConfig.StoredShares == 1 && c.seal.BarrierType() == wrapping.Shamir { sealKey, sealKeyShares, err = c.generateShares(barrierConfig) if err != nil { c.logger.Error("error generating shares", "error", err) @@ -300,9 +301,9 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // If we are storing shares, pop them out of the returned results and push // them through the seal switch c.seal.StoredKeysSupported() { - case StoredKeysSupportedShamirMaster: + case seal.StoredKeysSupportedShamirMaster: keysToStore := [][]byte{barrierKey} - if err := c.seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(sealKey); err != nil { + if err := c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(sealKey); err != nil { c.logger.Error("failed to set seal key", "error", err) return nil, errwrap.Wrapf("failed to set seal key: {{err}}", err) } @@ -311,7 +312,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes return nil, errwrap.Wrapf("failed to store keys: {{err}}", err) } results.SecretShares = sealKeyShares - case StoredKeysSupportedGeneric: + case seal.StoredKeysSupportedGeneric: keysToStore := [][]byte{barrierKey} if err := c.seal.SetStoredKeys(ctx, keysToStore); err != nil { c.logger.Error("failed to store keys", "error", err) @@ -407,7 +408,7 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error { c.unsealWithStoredKeysLock.Lock() defer c.unsealWithStoredKeysLock.Unlock() - if c.seal.BarrierType() == seal.Shamir { + if c.seal.BarrierType() == wrapping.Shamir { return nil } diff --git a/vault/init_test.go b/vault/init_test.go index 77715b40c6..cb2ebe9a7e 100644 --- a/vault/init_test.go +++ b/vault/init_test.go @@ -6,10 +6,10 @@ import ( "testing" log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical/inmem" - "github.com/hashicorp/vault/vault/seal" ) func TestCore_Init(t *testing.T) { @@ -80,7 +80,7 @@ func testCore_Init_Common(t *testing.T, c *Core, conf *CoreConfig, barrierConf, t.Fatalf("err: %v", err) } - if c.seal.BarrierType() == seal.Shamir && len(res.SecretShares) != barrierConf.SecretShares { + if c.seal.BarrierType() == wrapping.Shamir && len(res.SecretShares) != barrierConf.SecretShares { t.Fatalf("Bad: got\n%#v\nexpected conf matching\n%#v\n", *res, *barrierConf) } if recoveryConf != nil { diff --git a/vault/logical_system_raft.go b/vault/logical_system_raft.go index a00caaa98d..00b79e6dd0 100644 --- a/vault/logical_system_raft.go +++ b/vault/logical_system_raft.go @@ -8,13 +8,13 @@ import ( "strings" proto "github.com/golang/protobuf/proto" + wrapping "github.com/hashicorp/go-kms-wrapping" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/vault/seal" ) // raftStoragePaths returns paths for use when raft is the storage mechanism. @@ -192,7 +192,7 @@ func (b *SystemBackend) handleRaftBootstrapChallengeWrite() framework.OperationF } sealAccess := b.Core.seal.GetAccess() - eBlob, err := sealAccess.Encrypt(ctx, uuid) + eBlob, err := sealAccess.Encrypt(ctx, uuid, nil) if err != nil { return nil, err } @@ -337,7 +337,7 @@ func (b *SystemBackend) handleStorageRaftSnapshotWrite(force bool) framework.Ope case err == nil: case strings.Contains(err.Error(), "failed to open the sealed hashes"): switch b.Core.seal.BarrierType() { - case seal.Shamir: + case wrapping.Shamir: return logical.ErrorResponse("could not verify hash file, possibly the snapshot is using a different set of unseal keys; use the snapshot-force API to bypass this check"), logical.ErrInvalidRequest default: return logical.ErrorResponse("could not verify hash file, possibly the snapshot is using a different autoseal key; use the snapshot-force API to bypass this check"), logical.ErrInvalidRequest diff --git a/vault/raft.go b/vault/raft.go index 30f3901c46..75e8eda9a2 100644 --- a/vault/raft.go +++ b/vault/raft.go @@ -17,12 +17,12 @@ import ( proto "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" + wrapping "github.com/hashicorp/go-kms-wrapping" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault/seal" "github.com/mitchellh/mapstructure" "golang.org/x/net/http2" @@ -491,13 +491,14 @@ func (c *Core) raftSnapshotRestoreCallback(grabLock bool, sealNode bool) func(co // The snapshot contained a master key or keyring we couldn't // recover switch c.seal.BarrierType() { - case seal.Shamir: + case wrapping.Shamir: // If we are a shamir seal we can't do anything. Just // seal all nodes. // Seal ourselves c.logger.Info("failed to perform key upgrades, sealing", "error", err) return err + default: // If we are using an auto-unseal we can try to use the seal to // unseal again. If the auto-unseal mechanism has changed then @@ -599,7 +600,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderAddr string, tlsConfig return errwrap.Wrapf("error decoding challenge: {{err}}", err) } - eBlob := &physical.EncryptedBlobInfo{} + eBlob := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(challengeRaw, eBlob); err != nil { return errwrap.Wrapf("error decoding challenge: {{err}}", err) } @@ -609,7 +610,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderAddr string, tlsConfig leaderBarrierConfig: &sealConfig, nonVoter: nonVoter, } - if c.seal.BarrierType() == seal.Shamir { + if c.seal.BarrierType() == wrapping.Shamir { c.raftInfo = raftInfo c.seal.SetBarrierConfig(ctx, &sealConfig) return nil @@ -651,7 +652,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderAddr string, tlsConfig // This is used in tests to override the cluster address var UpdateClusterAddrForTests uint32 -func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess seal.Access, raftInfo *raftInformation) error { +func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess *seal.Access, raftInfo *raftInformation) error { if raftInfo.challenge == nil { return errors.New("raft challenge is nil") } @@ -665,7 +666,7 @@ func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess seal.Access, r return errors.New("raft is already initialized") } - plaintext, err := sealAccess.Decrypt(ctx, raftInfo.challenge) + plaintext, err := sealAccess.Decrypt(ctx, raftInfo.challenge, nil) if err != nil { return errwrap.Wrapf("error decrypting challenge: {{err}}", err) } diff --git a/vault/rekey.go b/vault/rekey.go index f6505a4712..cbaca02b6d 100644 --- a/vault/rekey.go +++ b/vault/rekey.go @@ -10,6 +10,8 @@ import ( "net/http" "github.com/hashicorp/errwrap" + wrapping "github.com/hashicorp/go-kms-wrapping" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/sdk/helper/consts" @@ -18,7 +20,6 @@ import ( "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/shamir" "github.com/hashicorp/vault/vault/seal" - shamirseal "github.com/hashicorp/vault/vault/seal/shamir" ) const ( @@ -169,7 +170,7 @@ func (c *Core) RekeyInit(config *SealConfig, recovery bool) logical.HTTPCodedErr // BarrierRekeyInit is used to initialize the rekey settings for the barrier key func (c *Core) BarrierRekeyInit(config *SealConfig) logical.HTTPCodedError { switch c.seal.BarrierType() { - case seal.Shamir: + case wrapping.Shamir: // As of Vault 1.3 all seals use StoredShares==1. The one exception is // legacy shamir seals, which we can read but not write (by design). // So if someone does a rekey, regardless of their intention, we're going @@ -332,7 +333,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) var existingConfig *SealConfig var err error var useRecovery bool // Determines whether recovery key is being used to rekey the master key - if c.seal.StoredKeysSupported() == StoredKeysSupportedGeneric && c.seal.RecoveryKeySupported() { + if c.seal.StoredKeysSupported() == seal.StoredKeysSupportedGeneric && c.seal.RecoveryKeySupported() { existingConfig, err = c.seal.RecoveryConfig(ctx) useRecovery = true } else { @@ -396,11 +397,15 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) c.logger.Error("rekey recovery key verification failed", "error", err) return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("recovery key verification failed: {{err}}", err).Error()) } - case c.seal.BarrierType() == seal.Shamir: - if c.seal.StoredKeysSupported() == StoredKeysSupportedShamirMaster { - testseal := NewDefaultSeal(shamirseal.NewSeal(c.logger.Named("testseal"))) + case c.seal.BarrierType() == wrapping.Shamir: + if c.seal.StoredKeysSupported() == seal.StoredKeysSupportedShamirMaster { + testseal := NewDefaultSeal(&seal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: c.logger.Named("testseal"), + }), + }) testseal.SetCore(c) - err = testseal.GetAccess().(*shamirseal.ShamirSeal).SetKey(recoveredKey) + err = testseal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(recoveredKey) if err != nil { return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to setup unseal key: {{err}}", err).Error()) } @@ -433,7 +438,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) results := &RekeyResult{ Backup: c.barrierRekeyConfig.Backup, } - if c.seal.StoredKeysSupported() != StoredKeysSupportedGeneric { + if c.seal.StoredKeysSupported() != seal.StoredKeysSupportedGeneric { // Set result.SecretShares to the new key itself if only a single key // part is used -- no Shamir split required. if c.barrierRekeyConfig.SecretShares == 1 { @@ -516,7 +521,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) } func (c *Core) performBarrierRekey(ctx context.Context, newSealKey []byte) logical.HTTPCodedError { - legacyUpgrade := c.seal.StoredKeysSupported() == StoredKeysNotSupported + legacyUpgrade := c.seal.StoredKeysSupported() == seal.StoredKeysNotSupported if legacyUpgrade { // We won't be able to call SetStoredKeys without setting StoredShares=1. existingConfig, err := c.seal.BarrierConfig(ctx) @@ -527,8 +532,8 @@ func (c *Core) performBarrierRekey(ctx context.Context, newSealKey []byte) logic c.seal.SetCachedBarrierConfig(existingConfig) } - if c.seal.StoredKeysSupported() != StoredKeysSupportedGeneric { - err := c.seal.GetAccess().(*shamirseal.ShamirSeal).SetKey(newSealKey) + if c.seal.StoredKeysSupported() != seal.StoredKeysSupportedGeneric { + err := c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(newSealKey) if err != nil { return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to update barrier seal key: {{err}}", err).Error()) } diff --git a/vault/rekey_test.go b/vault/rekey_test.go index fd3b53efd4..e5661cbe7c 100644 --- a/vault/rekey_test.go +++ b/vault/rekey_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault/seal" ) func TestCore_Rekey_Lifecycle(t *testing.T) { @@ -328,7 +329,7 @@ func TestCore_Rekey_Legacy(t *testing.T) { SecretThreshold: 1, } c, masterKeys, _, root := TestCoreUnsealedWithConfigSealOpts(t, bc, nil, - &TestSealOpts{StoredKeys: StoredKeysNotSupported}) + &seal.TestSealOpts{StoredKeys: seal.StoredKeysNotSupported}) testCore_Rekey_Update_Common(t, c, masterKeys, root, false) } @@ -518,7 +519,7 @@ func TestSysRekey_Verification_Invalid(t *testing.T) { core, _, _, _ := TestCoreUnsealedWithConfigSealOpts(t, &SealConfig{StoredShares: 1, SecretShares: 1, SecretThreshold: 1}, &SealConfig{StoredShares: 1, SecretShares: 1, SecretThreshold: 1}, - &TestSealOpts{StoredKeys: StoredKeysSupportedGeneric}) + &seal.TestSealOpts{StoredKeys: seal.StoredKeysSupportedGeneric}) err := core.BarrierRekeyInit(&SealConfig{ VerificationRequired: true, diff --git a/vault/seal.go b/vault/seal.go index 8583b2aecf..6697f15123 100644 --- a/vault/seal.go +++ b/vault/seal.go @@ -10,6 +10,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault/seal" @@ -49,35 +50,12 @@ const ( RecoveryTypeShamir = "shamir" ) -type StoredKeysSupport int - -const ( - // The 0 value of StoredKeysSupport is an invalid option - StoredKeysInvalid StoredKeysSupport = iota - StoredKeysNotSupported - StoredKeysSupportedGeneric - StoredKeysSupportedShamirMaster -) - -func (s StoredKeysSupport) String() string { - switch s { - case StoredKeysNotSupported: - return "Old-style Shamir" - case StoredKeysSupportedGeneric: - return "AutoUnseal" - case StoredKeysSupportedShamirMaster: - return "New-style Shamir" - default: - return "Invalid StoredKeys type" - } -} - type Seal interface { SetCore(*Core) Init(context.Context) error Finalize(context.Context) error - StoredKeysSupported() StoredKeysSupport + StoredKeysSupported() seal.StoredKeysSupport SealWrapable() bool SetStoredKeys(context.Context, [][]byte) error GetStoredKeys(context.Context) ([][]byte, error) @@ -96,16 +74,16 @@ type Seal interface { SetRecoveryKey(context.Context, []byte) error VerifyRecoveryKey(context.Context, []byte) error - GetAccess() seal.Access + GetAccess() *seal.Access } type defaultSeal struct { - access seal.Access + access *seal.Access config atomic.Value core *Core } -func NewDefaultSeal(lowLevel seal.Access) Seal { +func NewDefaultSeal(lowLevel *seal.Access) Seal { ret := &defaultSeal{ access: lowLevel, } @@ -124,11 +102,11 @@ func (d *defaultSeal) checkCore() error { return nil } -func (d *defaultSeal) GetAccess() seal.Access { +func (d *defaultSeal) GetAccess() *seal.Access { return d.access } -func (d *defaultSeal) SetAccess(access seal.Access) { +func (d *defaultSeal) SetAccess(access *seal.Access) { d.access = access } @@ -145,22 +123,22 @@ func (d *defaultSeal) Finalize(ctx context.Context) error { } func (d *defaultSeal) BarrierType() string { - return seal.Shamir + return wrapping.Shamir } -func (d *defaultSeal) StoredKeysSupported() StoredKeysSupport { +func (d *defaultSeal) StoredKeysSupported() seal.StoredKeysSupport { isLegacy, err := d.LegacySeal() if err != nil { if d.core != nil && d.core.logger != nil { d.core.logger.Error("no seal config found, can't determine if legacy or new-style shamir") } - return StoredKeysInvalid + return seal.StoredKeysInvalid } switch { case isLegacy: - return StoredKeysNotSupported + return seal.StoredKeysNotSupported default: - return StoredKeysSupportedShamirMaster + return seal.StoredKeysSupportedShamirMaster } } @@ -438,7 +416,7 @@ func (s *SealConfig) Clone() *SealConfig { return ret } -func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor seal.Encryptor, keys [][]byte) error { +func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor *seal.Access, keys [][]byte) error { if keys == nil { return fmt.Errorf("keys were nil") } @@ -452,7 +430,7 @@ func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor se } // Encrypt and marshal the keys - blobInfo, err := encryptor.Encrypt(ctx, buf) + blobInfo, err := encryptor.Encrypt(ctx, buf, nil) if err != nil { return errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err) } @@ -475,7 +453,7 @@ func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor se return nil } -func readStoredKeys(ctx context.Context, storage physical.Backend, encryptor seal.Encryptor) ([][]byte, error) { +func readStoredKeys(ctx context.Context, storage physical.Backend, encryptor *seal.Access) ([][]byte, error) { pe, err := storage.Get(ctx, StoredBarrierKeysPath) if err != nil { return nil, errwrap.Wrapf("failed to fetch stored keys: {{err}}", err) @@ -487,12 +465,12 @@ func readStoredKeys(ctx context.Context, storage physical.Backend, encryptor sea return nil, nil } - blobInfo := &physical.EncryptedBlobInfo{} + blobInfo := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { return nil, errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err) } - pt, err := encryptor.Decrypt(ctx, blobInfo) + pt, err := encryptor.Decrypt(ctx, blobInfo, nil) if err != nil { return nil, errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err) } diff --git a/vault/seal/alicloudkms/alicloudkms_acc_test.go b/vault/seal/alicloudkms/alicloudkms_acc_test.go deleted file mode 100644 index fe24ee8f72..0000000000 --- a/vault/seal/alicloudkms/alicloudkms_acc_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package alicloudkms - -import ( - "context" - "os" - "reflect" - "testing" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -// This test executes real calls. The calls themselves should be free, -// but the KMS key used is generally not free. Alibaba doesn't publish -// the price but it can be assumed to be around $1/month because that's -// what AWS charges for the same. -// -// To run this test, the following env variables need to be set: -// - VAULT_ALICLOUDKMS_SEAL_KEY_ID -// - ALICLOUD_REGION -// - ALICLOUD_ACCESS_KEY -// - ALICLOUD_SECRET_KEY -func TestAccAliCloudKMSSeal_Lifecycle(t *testing.T) { - if os.Getenv("VAULT_ACC") == "" { - t.SkipNow() - } - - s := NewSeal(logging.NewVaultLogger(log.Trace)) - _, err := s.SetConfig(nil) - if err != nil { - t.Fatalf("err : %s", err) - } - - input := []byte("foo") - swi, err := s.Encrypt(context.Background(), input) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - pt, err := s.Decrypt(context.Background(), swi) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - if !reflect.DeepEqual(input, pt) { - t.Fatalf("expected %s, got %s", input, pt) - } -} diff --git a/vault/seal/alicloudkms/alicloudkms_test.go b/vault/seal/alicloudkms/alicloudkms_test.go deleted file mode 100644 index 8c9f3541df..0000000000 --- a/vault/seal/alicloudkms/alicloudkms_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package alicloudkms - -import ( - "context" - "encoding/base64" - "errors" - "os" - "reflect" - "testing" - - "github.com/aliyun/alibaba-cloud-sdk-go/services/kms" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -const aliCloudTestKeyID = "foo" - -func TestAliCloudKMSSeal(t *testing.T) { - s := NewSeal(logging.NewVaultLogger(log.Trace)) - s.client = &mockAliCloudKMSSealClient{ - keyID: aliCloudTestKeyID, - } - - if _, err := s.SetConfig(nil); err == nil { - t.Fatal("expected error when AliCloudKMSSeal key ID is not provided") - } - - // Set the key - if err := os.Setenv(EnvAliCloudKMSSealKeyID, aliCloudTestKeyID); err != nil { - t.Fatal(err) - } - defer func() { - if err := os.Unsetenv(EnvAliCloudKMSSealKeyID); err != nil { - t.Fatal(err) - } - }() - if _, err := s.SetConfig(nil); err != nil { - t.Fatal(err) - } -} - -func TestAliCloudKMSSeal_Lifecycle(t *testing.T) { - s := NewSeal(logging.NewVaultLogger(log.Trace)) - s.client = &mockAliCloudKMSSealClient{ - keyID: aliCloudTestKeyID, - } - - if err := os.Setenv(EnvAliCloudKMSSealKeyID, aliCloudTestKeyID); err != nil { - t.Fatal(err) - } - defer func() { - if err := os.Unsetenv(EnvAliCloudKMSSealKeyID); err != nil { - t.Fatal(err) - } - }() - if _, err := s.SetConfig(nil); err != nil { - t.Fatal(err) - } - - // Test Encrypt and Decrypt calls - input := []byte("foo") - swi, err := s.Encrypt(context.Background(), input) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - pt, err := s.Decrypt(context.Background(), swi) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - if !reflect.DeepEqual(input, pt) { - t.Fatalf("expected %s, got %s", input, pt) - } -} - -type mockAliCloudKMSSealClient struct { - keyID string -} - -// Encrypt is a mocked call that returns a base64 encoded string. -func (m *mockAliCloudKMSSealClient) Encrypt(request *kms.EncryptRequest) (response *kms.EncryptResponse, err error) { - m.keyID = request.KeyId - - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(request.Plaintext))) - base64.StdEncoding.Encode(encoded, []byte(request.Plaintext)) - - output := kms.CreateEncryptResponse() - output.CiphertextBlob = string(encoded) - output.KeyId = request.KeyId - return output, nil -} - -// Decrypt is a mocked call that returns a decoded base64 string. -func (m *mockAliCloudKMSSealClient) Decrypt(request *kms.DecryptRequest) (response *kms.DecryptResponse, err error) { - decLen := base64.StdEncoding.DecodedLen(len(request.CiphertextBlob)) - decoded := make([]byte, decLen) - len, err := base64.StdEncoding.Decode(decoded, []byte(request.CiphertextBlob)) - if err != nil { - return nil, err - } - - if len < decLen { - decoded = decoded[:len] - } - - output := kms.CreateDecryptResponse() - output.KeyId = m.keyID - output.Plaintext = string(decoded) - return output, nil -} - -// DescribeKey is a mocked call that returns the keyID. -func (m *mockAliCloudKMSSealClient) DescribeKey(request *kms.DescribeKeyRequest) (response *kms.DescribeKeyResponse, err error) { - if m.keyID == "" { - return nil, errors.New("key not found") - } - output := kms.CreateDescribeKeyResponse() - output.KeyMetadata = kms.KeyMetadata{ - KeyId: m.keyID, - } - return output, nil -} diff --git a/vault/seal/awskms/awskms_test.go b/vault/seal/awskms/awskms_test.go deleted file mode 100644 index 1747cc6969..0000000000 --- a/vault/seal/awskms/awskms_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package awskms - -import ( - "context" - "os" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -func TestAWSKMSSeal(t *testing.T) { - s := NewSeal(logging.NewVaultLogger(log.Trace)) - s.client = &mockAWSKMSSealClient{ - keyID: aws.String(awsTestKeyID), - } - - _, err := s.SetConfig(nil) - if err == nil { - t.Fatal("expected error when AWSKMSSeal key ID is not provided") - } - - // Set the key - oldKeyID := os.Getenv(EnvAWSKMSSealKeyID) - os.Setenv(EnvAWSKMSSealKeyID, awsTestKeyID) - defer os.Setenv(EnvAWSKMSSealKeyID, oldKeyID) - _, err = s.SetConfig(nil) - if err != nil { - t.Fatal(err) - } -} - -func TestAWSKMSSeal_Lifecycle(t *testing.T) { - if os.Getenv(EnvAWSKMSSealKeyID) == "" { - t.SkipNow() - } - s := NewSeal(logging.NewVaultLogger(log.Trace)) - s.client = &mockAWSKMSSealClient{ - keyID: aws.String(awsTestKeyID), - } - oldKeyID := os.Getenv(EnvAWSKMSSealKeyID) - os.Setenv(EnvAWSKMSSealKeyID, awsTestKeyID) - defer os.Setenv(EnvAWSKMSSealKeyID, oldKeyID) - testEncryptionRoundTrip(t, s) -} - -// This test executes real calls. The calls themselves should be free, -// but the KMS key used is generally not free. AWS charges about $1/month -// per key. -// -// To run this test, the following env variables need to be set: -// - VAULT_AWSKMS_SEAL_KEY_ID -// - AWS_REGION -// - AWS_ACCESS_KEY_ID -// - AWS_SECRET_ACCESS_KEY -func TestAccAWSKMSSeal_Lifecycle(t *testing.T) { - if os.Getenv(EnvAWSKMSSealKeyID) == "" { - t.SkipNow() - } - s := NewSeal(logging.NewVaultLogger(log.Trace)) - testEncryptionRoundTrip(t, s) -} - -func testEncryptionRoundTrip(t *testing.T, seal *AWSKMSSeal) { - seal.SetConfig(nil) - input := []byte("foo") - swi, err := seal.Encrypt(context.Background(), input) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - pt, err := seal.Decrypt(context.Background(), swi) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - if !reflect.DeepEqual(input, pt) { - t.Fatalf("expected %s, got %s", input, pt) - } -} - -func TestAWSKMSSeal_custom_endpoint(t *testing.T) { - customEndpoint := "https://custom.endpoint" - customEndpoint2 := "https://custom.endpoint.2" - endpointENV := "AWS_KMS_ENDPOINT" - - // unset at end of test - os.Setenv(EnvAWSKMSSealKeyID, awsTestKeyID) - defer func() { - if err := os.Unsetenv(EnvAWSKMSSealKeyID); err != nil { - t.Fatal(err) - } - }() - - cfg := make(map[string]string) - cfg["endpoint"] = customEndpoint - - testCases := []struct { - Title string - Env string - Config map[string]string - Expected *string - }{ - { - // Default will have nil for the config endpoint, and be looked up - // dynamically by the SDK - Title: "Default", - }, - { - Title: "Environment", - Env: customEndpoint, - Expected: aws.String(customEndpoint), - }, - { - Title: "Config", - Config: cfg, - Expected: aws.String(customEndpoint), - }, - { - // Expect environment to take precedence over configuration - Title: "Env-Config", - Env: customEndpoint2, - Config: cfg, - Expected: aws.String(customEndpoint2), - }, - } - - for _, tc := range testCases { - t.Run(tc.Title, func(t *testing.T) { - s := NewSeal(logging.NewVaultLogger(log.Trace)) - - s.client = &mockAWSKMSSealClient{ - keyID: aws.String(awsTestKeyID), - } - - if tc.Env != "" { - if err := os.Setenv(endpointENV, tc.Env); err != nil { - t.Fatal(err) - } - } - - // cfg starts as nil, and takes a test case value if given. If not, - // SetConfig is called with nil and creates it's own config - var cfg map[string]string - if tc.Config != nil { - cfg = tc.Config - } - if _, err := s.SetConfig(cfg); err != nil { - t.Fatalf("error setting config: %s", err) - } - - // call getAWSKMSClient() to get the configured client and verify it's - // endpoint - k, err := s.getAWSKMSClient() - if err != nil { - t.Fatal(err) - } - - if tc.Expected == nil && k.Config.Endpoint != nil { - t.Fatalf("Expected nil endpoint, got: (%s)", *k.Config.Endpoint) - } - - if tc.Expected != nil { - if k.Config.Endpoint == nil { - t.Fatal("expected custom endpoint, but config was nil") - } - if *k.Config.Endpoint != *tc.Expected { - t.Fatalf("expected custom endpoint (%s), got: (%s)", *tc.Expected, *k.Config.Endpoint) - } - } - - // clear endpoint env after each test - if err := os.Unsetenv(endpointENV); err != nil { - t.Fatal(err) - } - }) - } - -} diff --git a/vault/seal/azurekeyvault/azurekeyvault_acc_test.go b/vault/seal/azurekeyvault/azurekeyvault_acc_test.go deleted file mode 100644 index ef5a7a8b54..0000000000 --- a/vault/seal/azurekeyvault/azurekeyvault_acc_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package azurekeyvault - -import ( - "context" - "os" - "reflect" - "testing" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -func TestAzureKeyVault_SetConfig(t *testing.T) { - if os.Getenv("VAULT_ACC") == "" { - t.SkipNow() - } - - seal := NewSeal(logging.NewVaultLogger(log.Trace)) - - tenantID := os.Getenv("AZURE_TENANT_ID") - os.Unsetenv("AZURE_TENANT_ID") - - // Attempt to set config, expect failure due to missing config - _, err := seal.SetConfig(nil) - if err == nil { - t.Fatal("expected error when Azure Key Vault config values are not provided") - } - - os.Setenv("AZURE_TENANT_ID", tenantID) - - _, err = seal.SetConfig(nil) - if err != nil { - t.Fatal(err) - } -} - -func TestAzureKeyVault_Lifecycle(t *testing.T) { - if os.Getenv("VAULT_ACC") == "" { - t.SkipNow() - } - - s := NewSeal(logging.NewVaultLogger(log.Trace)) - _, err := s.SetConfig(nil) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - // Test Encrypt and Decrypt calls - input := []byte("foo") - swi, err := s.Encrypt(context.Background(), input) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - pt, err := s.Decrypt(context.Background(), swi) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - if !reflect.DeepEqual(input, pt) { - t.Fatalf("expected %s, got %s", input, pt) - } -} diff --git a/vault/seal/envelope.go b/vault/seal/envelope.go index cdd6fcb8a5..6558fc54cd 100644 --- a/vault/seal/envelope.go +++ b/vault/seal/envelope.go @@ -1,72 +1,36 @@ package seal import ( - "crypto/aes" - "crypto/cipher" - "errors" + "sync" "time" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" - uuid "github.com/hashicorp/go-uuid" + wrapping "github.com/hashicorp/go-kms-wrapping" ) -type Envelope struct{} - -type EnvelopeInfo struct { - Ciphertext []byte - Key []byte - IV []byte +type Envelope struct { + envelope *wrapping.Envelope + once sync.Once } func NewEnvelope() *Envelope { return &Envelope{} } -func (e *Envelope) Encrypt(plaintext []byte) (*EnvelopeInfo, error) { +func (e *Envelope) init() { + e.envelope = new(wrapping.Envelope) +} + +func (e *Envelope) Encrypt(plaintext, aad []byte) (*wrapping.EnvelopeInfo, error) { defer metrics.MeasureSince([]string{"seal", "envelope", "encrypt"}, time.Now()) + e.once.Do(e.init) - // Generate DEK - key, err := uuid.GenerateRandomBytes(32) - if err != nil { - return nil, err - } - iv, err := uuid.GenerateRandomBytes(12) - if err != nil { - return nil, err - } - aead, err := e.aeadEncrypter(key) - if err != nil { - return nil, err - } - return &EnvelopeInfo{ - Ciphertext: aead.Seal(nil, iv, plaintext, nil), - Key: key, - IV: iv, - }, nil + return e.envelope.Encrypt(plaintext, aad) } -func (e *Envelope) Decrypt(data *EnvelopeInfo) ([]byte, error) { +func (e *Envelope) Decrypt(data *wrapping.EnvelopeInfo, aad []byte) ([]byte, error) { defer metrics.MeasureSince([]string{"seal", "envelope", "decrypt"}, time.Now()) + e.once.Do(e.init) - aead, err := e.aeadEncrypter(data.Key) - if err != nil { - return nil, err - } - return aead.Open(nil, data.IV, data.Ciphertext, nil) -} - -func (e *Envelope) aeadEncrypter(key []byte) (cipher.AEAD, error) { - aesCipher, err := aes.NewCipher(key) - if err != nil { - return nil, errwrap.Wrapf("failed to create cipher: {{err}}", err) - } - - // Create the GCM mode AEAD - gcm, err := cipher.NewGCM(aesCipher) - if err != nil { - return nil, errors.New("failed to initialize GCM mode") - } - - return gcm, nil + return e.envelope.Decrypt(data, aad) } diff --git a/vault/seal/envelope_test.go b/vault/seal/envelope_test.go index 974b056336..3600ff4517 100644 --- a/vault/seal/envelope_test.go +++ b/vault/seal/envelope_test.go @@ -7,12 +7,12 @@ import ( func TestEnvelope(t *testing.T) { input := []byte("test") - env, err := NewEnvelope().Encrypt(input) + env, err := NewEnvelope().Encrypt(input, nil) if err != nil { t.Fatal(err) } - output, err := NewEnvelope().Decrypt(env) + output, err := NewEnvelope().Decrypt(env, nil) if err != nil { t.Fatal(err) } diff --git a/vault/seal/gcpckms/gcpckms_acc_test.go b/vault/seal/gcpckms/gcpckms_acc_test.go deleted file mode 100644 index 1781a69388..0000000000 --- a/vault/seal/gcpckms/gcpckms_acc_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package gcpckms - -import ( - "os" - "reflect" - "testing" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" - context "golang.org/x/net/context" -) - -const ( - // These values need to match the values from the hc-value-testing project - gcpckmsTestProjectID = "hc-vault-testing" - gcpckmsTestLocationID = "global" - gcpckmsTestKeyRing = "vault-test-keyring" - gcpckmsTestCryptoKey = "vault-test-key" -) - -func TestGCPCKMSSeal(t *testing.T) { - // Do an error check before env vars are set - s := NewSeal(logging.NewVaultLogger(log.Trace)) - _, err := s.SetConfig(nil) - if err == nil { - t.Fatal("expected error when GCPCKMSSeal required values are not provided") - } - - // Now test for cases where CKMS values are provided - checkAndSetEnvVars(t) - - configCases := map[string]map[string]string{ - "env_var": nil, - "config": map[string]string{ - "credentials": os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"), - }, - } - - for name, config := range configCases { - t.Run(name, func(t *testing.T) { - s := NewSeal(logging.NewVaultLogger(log.Trace)) - _, err := s.SetConfig(config) - if err != nil { - t.Fatalf("error setting seal config: %v", err) - } - }) - } -} - -func TestGCPCKMSSeal_Lifecycle(t *testing.T) { - checkAndSetEnvVars(t) - - s := NewSeal(logging.NewVaultLogger(log.Trace)) - _, err := s.SetConfig(nil) - if err != nil { - t.Fatalf("error setting seal config: %v", err) - } - - // Test Encrypt and Decrypt calls - input := []byte("foo") - swi, err := s.Encrypt(context.Background(), input) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - pt, err := s.Decrypt(context.Background(), swi) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - if !reflect.DeepEqual(input, pt) { - t.Fatalf("expected %s, got %s", input, pt) - } -} - -// checkAndSetEnvVars check and sets the required env vars. It will skip tests that are -// not ran as acceptance tests since they require calling to external APIs. -func checkAndSetEnvVars(t *testing.T) { - t.Helper() - - // Skip tests if we are not running acceptance tests - if os.Getenv("VAULT_ACC") == "" { - t.SkipNow() - } - - if os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" && os.Getenv(EnvGCPCKMSSealCredsPath) == "" { - t.Fatal("unable to get GCP credentials via environment variables") - } - - if os.Getenv(EnvGCPCKMSSealProject) == "" { - os.Setenv(EnvGCPCKMSSealProject, gcpckmsTestProjectID) - } - - if os.Getenv(EnvGCPCKMSSealLocation) == "" { - os.Setenv(EnvGCPCKMSSealLocation, gcpckmsTestLocationID) - } - - if os.Getenv(EnvGCPCKMSSealKeyRing) == "" { - os.Setenv(EnvGCPCKMSSealKeyRing, gcpckmsTestKeyRing) - } - - if os.Getenv(EnvGCPCKMSSealCryptoKey) == "" { - os.Setenv(EnvGCPCKMSSealCryptoKey, gcpckmsTestCryptoKey) - } -} diff --git a/vault/seal/ocikms/ocikms_test.go b/vault/seal/ocikms/ocikms_test.go deleted file mode 100644 index fdb28056de..0000000000 --- a/vault/seal/ocikms/ocikms_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright © 2019, Oracle and/or its affiliates. -package ocikms - -import ( - "os" - "reflect" - "testing" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" - "golang.org/x/net/context" -) - -/* -* To run these tests, ensure you setup: -* 1. OCI SDK with your credentials. Refer to here: -* https://docs.cloud.oracle.com/iaas/Content/API/Concepts/sdkconfig.htm -* 2. Go to ocikms folder: vault/vault/seal/ocikms -* VAULT_OCIKMS_SEAL_KEY_ID="your-kms-key" VAULT_OCIKMS_CRYPTO_ENDPOINT="your-kms-crypto-endpoint" go test - */ - -func TestOCIKMSSeal(t *testing.T) { - initSeal(t) -} - -func TestOCIKMSSeal_LifeCycle(t *testing.T) { - s := initSeal(t) - - // Test Encrypt and Decrypt calls - input := []byte("foo") - swi, err := s.Encrypt(context.Background(), input) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - pt, err := s.Decrypt(context.Background(), swi) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - if !reflect.DeepEqual(input, pt) { - t.Fatalf("expected %s, got %s", input, pt) - } -} - -func initSeal(t *testing.T) *OCIKMSSeal { - // Skip tests if we are not running acceptance tests - if os.Getenv("VAULT_ACC") == "" { - t.SkipNow() - } - s := NewSeal(logging.NewVaultLogger(log.Trace)) - _, err := s.SetConfig(nil) - if err == nil { - t.Fatal("expected error when OCIKMSSeal required values are not provided") - } - - mockConfig := map[string]string{ - - "auth_type_api_key": "true", - } - - _, err = s.SetConfig(mockConfig) - if err != nil { - t.Fatalf("error setting seal config: %v", err) - } - - return s -} diff --git a/vault/seal/seal.go b/vault/seal/seal.go index fd1deb3f53..0df1cf0bf4 100644 --- a/vault/seal/seal.go +++ b/vault/seal/seal.go @@ -2,39 +2,83 @@ package seal import ( "context" + "time" - "github.com/hashicorp/vault/sdk/physical" + metrics "github.com/armon/go-metrics" + wrapping "github.com/hashicorp/go-kms-wrapping" ) +type StoredKeysSupport int + const ( - Shamir = "shamir" - PKCS11 = "pkcs11" - AliCloudKMS = "alicloudkms" - AWSKMS = "awskms" - GCPCKMS = "gcpckms" - AzureKeyVault = "azurekeyvault" - OCIKMS = "ocikms" - Transit = "transit" - Test = "test-auto" - - // HSMAutoDeprecated is a deprecated seal type prior to 0.9.0. - // It is still referenced in certain code paths for upgrade purporses - HSMAutoDeprecated = "hsm-auto" + // The 0 value of StoredKeysSupport is an invalid option + StoredKeysInvalid StoredKeysSupport = iota + StoredKeysNotSupported + StoredKeysSupportedGeneric + StoredKeysSupportedShamirMaster ) -type Encryptor interface { - Encrypt(context.Context, []byte) (*physical.EncryptedBlobInfo, error) - Decrypt(context.Context, *physical.EncryptedBlobInfo) ([]byte, error) +func (s StoredKeysSupport) String() string { + switch s { + case StoredKeysNotSupported: + return "Old-style Shamir" + case StoredKeysSupportedGeneric: + return "AutoUnseal" + case StoredKeysSupportedShamirMaster: + return "New-style Shamir" + default: + return "Invalid StoredKeys type" + } } // Access is the embedded implementation of autoSeal that contains logic // specific to encrypting and decrypting data, or in this case keys. -type Access interface { - SealType() string - KeyID() string - - Init(context.Context) error - Finalize(context.Context) error - - Encryptor +type Access struct { + wrapping.Wrapper + OverriddenType string +} + +func (a *Access) SetType(t string) { + a.OverriddenType = t +} + +func (a *Access) Type() string { + if a.OverriddenType != "" { + return a.OverriddenType + } + return a.Wrapper.Type() +} + +func (a *Access) Encrypt(ctx context.Context, plaintext, aad []byte) (blob *wrapping.EncryptedBlobInfo, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"seal", "encrypt", "time"}, now) + metrics.MeasureSince([]string{"seal", a.Wrapper.Type(), "encrypt", "time"}, now) + + if err != nil { + metrics.IncrCounter([]string{"seal", "encrypt", "error"}, 1) + metrics.IncrCounter([]string{"seal", a.Wrapper.Type(), "encrypt", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"seal", "encrypt"}, 1) + metrics.IncrCounter([]string{"seal", a.Wrapper.Type(), "encrypt"}, 1) + + return a.Wrapper.Encrypt(ctx, plaintext, aad) +} + +func (a *Access) Decrypt(ctx context.Context, data *wrapping.EncryptedBlobInfo, aad []byte) (pt []byte, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"seal", "decrypt", "time"}, now) + metrics.MeasureSince([]string{"seal", a.Wrapper.Type(), "decrypt", "time"}, now) + + if err != nil { + metrics.IncrCounter([]string{"seal", "decrypt", "error"}, 1) + metrics.IncrCounter([]string{"seal", a.Wrapper.Type(), "decrypt", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"seal", "decrypt"}, 1) + metrics.IncrCounter([]string{"seal", a.Wrapper.Type(), "decrypt"}, 1) + + return a.Wrapper.Decrypt(ctx, data, aad) } diff --git a/vault/seal/seal_testing.go b/vault/seal/seal_testing.go index ccd37b2f72..e4f4db3cea 100644 --- a/vault/seal/seal_testing.go +++ b/vault/seal/seal_testing.go @@ -1,89 +1,24 @@ package seal import ( - "context" - - "github.com/hashicorp/vault/helper/xor" - "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" ) -type TestSeal struct { - Type string - secret []byte - keyId string +type TestSealOpts struct { + Logger hclog.Logger + StoredKeys StoredKeysSupport + Secret []byte + Name string } -var _ Access = (*TestSeal)(nil) - -func NewTestSeal(secret []byte) *TestSeal { - return &TestSeal{ - Type: Test, - secret: secret, - keyId: "static-key", - } -} - -func (s *TestSeal) Init(_ context.Context) error { - return nil -} - -func (t *TestSeal) Finalize(_ context.Context) error { - return nil -} - -func (t *TestSeal) SealType() string { - return t.Type -} - -func (t *TestSeal) KeyID() string { - return t.keyId -} - -func (t *TestSeal) SetKeyID(k string) { - t.keyId = k -} - -func (t *TestSeal) Encrypt(_ context.Context, plaintext []byte) (*physical.EncryptedBlobInfo, error) { - ct, err := t.obscureBytes(plaintext) - if err != nil { - return nil, err +func NewTestSeal(opts *TestSealOpts) *Access { + if opts == nil { + opts = new(TestSealOpts) } - return &physical.EncryptedBlobInfo{ - Ciphertext: ct, - KeyInfo: &physical.SealKeyInfo{ - KeyID: t.KeyID(), - }, - }, nil -} - -func (t *TestSeal) Decrypt(_ context.Context, dwi *physical.EncryptedBlobInfo) ([]byte, error) { - return t.obscureBytes(dwi.Ciphertext) -} - -// obscureBytes is a helper to simulate "encryption/decryption" -// on protected values. -func (t *TestSeal) obscureBytes(in []byte) ([]byte, error) { - out := make([]byte, len(in)) - - if len(t.secret) != 0 { - // make sure they are the same length - localSecret := make([]byte, len(in)) - copy(localSecret, t.secret) - - var err error - - out, err = xor.XORBytes(in, localSecret) - if err != nil { - return nil, err - } - - } else { - // if there is no secret, simply reverse the string - for i := 0; i < len(in); i++ { - out[i] = in[len(in)-1-i] - } + return &Access{ + Wrapper: wrapping.NewTestWrapper(opts.Secret), + OverriddenType: opts.Name, } - - return out, nil } diff --git a/vault/seal/shamir/shamir.go b/vault/seal/shamir/shamir.go deleted file mode 100644 index 5054cc1d34..0000000000 --- a/vault/seal/shamir/shamir.go +++ /dev/null @@ -1,114 +0,0 @@ -package shamir - -import ( - "context" - "crypto/aes" - "crypto/cipher" - "errors" - "fmt" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/vault/seal" -) - -// ShamirSeal implements the seal.Access interface for Shamir unseal -type ShamirSeal struct { - logger log.Logger - key []byte - aead cipher.AEAD -} - -// Ensure that we are implementing AutoSealAccess -var _ seal.Access = (*ShamirSeal)(nil) - -// NewSeal creates a new ShamirSeal with the provided logger -func NewSeal(logger log.Logger) *ShamirSeal { - seal := &ShamirSeal{ - logger: logger, - } - return seal -} - -func (s *ShamirSeal) GetKey() []byte { - return s.key -} - -func (s *ShamirSeal) SetKey(key []byte) error { - aesCipher, err := aes.NewCipher(key) - if err != nil { - return err - } - - aead, err := cipher.NewGCM(aesCipher) - if err != nil { - return err - } - - s.key = key - s.aead = aead - return nil -} - -// Init is called during core.Initialize. No-op at the moment. -func (s *ShamirSeal) Init(_ context.Context) error { - return nil -} - -// Finalize is called during shutdown. This is a no-op since -// ShamirSeal doesn't require any cleanup. -func (s *ShamirSeal) Finalize(_ context.Context) error { - return nil -} - -// SealType returns the seal type for this particular seal implementation. -func (s *ShamirSeal) SealType() string { - return seal.Shamir -} - -// KeyID returns the last known key id. -func (s *ShamirSeal) KeyID() string { - return "" -} - -// Encrypt is used to encrypt the plaintext using the aead held by the seal. -func (s *ShamirSeal) Encrypt(_ context.Context, plaintext []byte) (*physical.EncryptedBlobInfo, error) { - if plaintext == nil { - return nil, fmt.Errorf("given plaintext for encryption is nil") - } - - if s.aead == nil { - return nil, errors.New("aead is not configured in the seal") - } - - iv, err := uuid.GenerateRandomBytes(12) - if err != nil { - return nil, err - } - - ciphertext := s.aead.Seal(nil, iv, plaintext, nil) - - return &physical.EncryptedBlobInfo{ - Ciphertext: append(iv, ciphertext...), - }, nil -} - -func (s *ShamirSeal) Decrypt(_ context.Context, in *physical.EncryptedBlobInfo) ([]byte, error) { - if in == nil { - return nil, fmt.Errorf("given plaintext for encryption is nil") - } - - if s.aead == nil { - return nil, errors.New("aead is not configured in the seal") - } - - iv, ciphertext := in.Ciphertext[:12], in.Ciphertext[12:] - - plaintext, err := s.aead.Open(nil, iv, ciphertext, nil) - if err != nil { - return nil, err - } - - return plaintext, nil -} diff --git a/vault/seal/transit/transit.go b/vault/seal/transit/transit.go deleted file mode 100644 index eface946a3..0000000000 --- a/vault/seal/transit/transit.go +++ /dev/null @@ -1,131 +0,0 @@ -package transit - -import ( - "context" - "errors" - "strings" - "sync/atomic" - "time" - - "github.com/armon/go-metrics" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/vault/seal" -) - -// Seal is a seal that leverages Vault's Transit secret -// engine -type Seal struct { - logger log.Logger - client transitClientEncryptor - currentKeyID *atomic.Value -} - -var _ seal.Access = (*Seal)(nil) - -// NewSeal creates a new transit seal -func NewSeal(logger log.Logger) *Seal { - s := &Seal{ - logger: logger.ResetNamed("seal-transit"), - currentKeyID: new(atomic.Value), - } - s.currentKeyID.Store("") - return s -} - -// SetConfig processes the config info from the server config -func (s *Seal) SetConfig(config map[string]string) (map[string]string, error) { - client, sealInfo, err := newTransitClient(s.logger, config) - if err != nil { - return nil, err - } - s.client = client - - // Send a value to test the seal and to set the current key id - if _, err := s.Encrypt(context.Background(), []byte("a")); err != nil { - client.Close() - return nil, err - } - - return sealInfo, nil -} - -// Init is called during core.Initialize -func (s *Seal) Init(_ context.Context) error { - return nil -} - -// Finalize is called during shutdown -func (s *Seal) Finalize(_ context.Context) error { - s.client.Close() - return nil -} - -// SealType returns the seal type for this particular seal implementation. -func (s *Seal) SealType() string { - return seal.Transit -} - -// KeyID returns the last known key id. -func (s *Seal) KeyID() string { - return s.currentKeyID.Load().(string) -} - -// Encrypt is used to encrypt using Vaults Transit engine -func (s *Seal) Encrypt(_ context.Context, plaintext []byte) (blob *physical.EncryptedBlobInfo, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "encrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "transit", "encrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "encrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "transit", "encrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "encrypt"}, 1) - metrics.IncrCounter([]string{"seal", "transit", "encrypt"}, 1) - - ciphertext, err := s.client.Encrypt(plaintext) - if err != nil { - return nil, err - } - - splitKey := strings.Split(string(ciphertext), ":") - if len(splitKey) != 3 { - return nil, errors.New("invalid ciphertext returned") - } - keyID := splitKey[1] - s.currentKeyID.Store(keyID) - - ret := &physical.EncryptedBlobInfo{ - Ciphertext: ciphertext, - KeyInfo: &physical.SealKeyInfo{ - KeyID: keyID, - }, - } - return ret, nil -} - -// Decrypt is used to decrypt the ciphertext -func (s *Seal) Decrypt(_ context.Context, in *physical.EncryptedBlobInfo) (pt []byte, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "decrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "transit", "decrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "decrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "transit", "decrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "decrypt"}, 1) - metrics.IncrCounter([]string{"seal", "transit", "decrypt"}, 1) - - plaintext, err := s.client.Decrypt(in.Ciphertext) - if err != nil { - return nil, err - } - return plaintext, nil -} diff --git a/vault/seal/transit/transit_test.go b/vault/seal/transit/transit_test.go deleted file mode 100644 index e0c7a38708..0000000000 --- a/vault/seal/transit/transit_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package transit - -import ( - "context" - "errors" - "fmt" - "reflect" - "strings" - "testing" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/vault/seal" -) - -type testTransitClient struct { - keyID string - seal seal.Access -} - -func newTestTransitClient(keyID string) *testTransitClient { - return &testTransitClient{ - keyID: keyID, - seal: seal.NewTestSeal(nil), - } -} - -func (m *testTransitClient) Close() {} - -func (m *testTransitClient) Encrypt(plaintext []byte) ([]byte, error) { - v, err := m.seal.Encrypt(context.Background(), plaintext) - if err != nil { - return nil, err - } - - return []byte(fmt.Sprintf("v1:%s:%s", m.keyID, string(v.Ciphertext))), nil -} - -func (m *testTransitClient) Decrypt(ciphertext []byte) ([]byte, error) { - splitKey := strings.Split(string(ciphertext), ":") - if len(splitKey) != 3 { - return nil, errors.New("invalid ciphertext returned") - } - - data := &physical.EncryptedBlobInfo{ - Ciphertext: []byte(splitKey[2]), - } - v, err := m.seal.Decrypt(context.Background(), data) - if err != nil { - return nil, err - } - - return v, nil -} - -func TestTransitSeal_Lifecycle(t *testing.T) { - s := NewSeal(logging.NewVaultLogger(log.Trace)) - - keyID := "test-key" - s.client = newTestTransitClient(keyID) - - // Test Encrypt and Decrypt calls - input := []byte("foo") - swi, err := s.Encrypt(context.Background(), input) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - pt, err := s.Decrypt(context.Background(), swi) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - if !reflect.DeepEqual(input, pt) { - t.Fatalf("expected %s, got %s", input, pt) - } - - if s.KeyID() != keyID { - t.Fatalf("key id does not match: expected %s, got %s", keyID, s.KeyID()) - } -} diff --git a/vault/seal_access.go b/vault/seal_access.go index 5f44433c34..050702320e 100644 --- a/vault/seal_access.go +++ b/vault/seal_access.go @@ -2,6 +2,8 @@ package vault import ( "context" + + "github.com/hashicorp/vault/vault/seal" ) // SealAccess is a wrapper around Seal that exposes accessor methods @@ -15,7 +17,7 @@ func NewSealAccess(seal Seal) *SealAccess { return &SealAccess{seal: seal} } -func (s *SealAccess) StoredKeysSupported() StoredKeysSupport { +func (s *SealAccess) StoredKeysSupported() seal.StoredKeysSupport { return s.seal.StoredKeysSupported() } @@ -45,3 +47,7 @@ func (s *SealAccess) ClearCaches(ctx context.Context) { s.seal.SetRecoveryConfig(ctx, nil) } } + +func (s *SealAccess) GetAccess() *seal.Access { + return s.seal.GetAccess() +} diff --git a/vault/seal_autoseal.go b/vault/seal_autoseal.go index cb8af84086..6ff171ec5c 100644 --- a/vault/seal_autoseal.go +++ b/vault/seal_autoseal.go @@ -10,6 +10,7 @@ import ( proto "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault/seal" ) @@ -22,7 +23,7 @@ var barrierTypeUpgradeCheck = func(_ string, _ *SealConfig) {} // decrypting stored keys via an underlying AutoSealAccess implementation, as // well as logic related to recovery keys and barrier config. type autoSeal struct { - seal.Access + *seal.Access barrierConfig atomic.Value recoveryConfig atomic.Value @@ -33,7 +34,7 @@ type autoSeal struct { // Ensure we are implementing the Seal interface var _ Seal = (*autoSeal)(nil) -func NewAutoSeal(lowLevel seal.Access) *autoSeal { +func NewAutoSeal(lowLevel *seal.Access) *autoSeal { ret := &autoSeal{ Access: lowLevel, } @@ -46,7 +47,7 @@ func (d *autoSeal) SealWrapable() bool { return true } -func (d *autoSeal) GetAccess() seal.Access { +func (d *autoSeal) GetAccess() *seal.Access { return d.Access } @@ -74,11 +75,11 @@ func (d *autoSeal) Finalize(ctx context.Context) error { } func (d *autoSeal) BarrierType() string { - return d.SealType() + return d.Type() } -func (d *autoSeal) StoredKeysSupported() StoredKeysSupport { - return StoredKeysSupportedGeneric +func (d *autoSeal) StoredKeysSupported() seal.StoredKeysSupport { + return seal.StoredKeysSupportedGeneric } func (d *autoSeal) RecoveryKeySupported() bool { @@ -88,13 +89,13 @@ func (d *autoSeal) RecoveryKeySupported() bool { // SetStoredKeys uses the autoSeal.Access.Encrypts method to wrap the keys. The stored entry // does not need to be seal wrapped in this case. func (d *autoSeal) SetStoredKeys(ctx context.Context, keys [][]byte) error { - return writeStoredKeys(ctx, d.core.physical, d, keys) + return writeStoredKeys(ctx, d.core.physical, d.Access, keys) } // GetStoredKeys retrieves the key shares by unwrapping the encrypted key using the // autoseal. func (d *autoSeal) GetStoredKeys(ctx context.Context) ([][]byte, error) { - return readStoredKeys(ctx, d.core.physical, d) + return readStoredKeys(ctx, d.core.physical, d.Access) } func (d *autoSeal) upgradeStoredKeys(ctx context.Context) error { @@ -106,7 +107,7 @@ func (d *autoSeal) upgradeStoredKeys(ctx context.Context) error { return fmt.Errorf("no stored keys found") } - blobInfo := &physical.EncryptedBlobInfo{} + blobInfo := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { return errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err) } @@ -114,7 +115,7 @@ func (d *autoSeal) upgradeStoredKeys(ctx context.Context) error { if blobInfo.KeyInfo != nil && blobInfo.KeyInfo.KeyID != d.Access.KeyID() { d.logger.Info("upgrading stored keys") - pt, err := d.Decrypt(ctx, blobInfo) + pt, err := d.Decrypt(ctx, blobInfo, nil) if err != nil { return errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err) } @@ -139,7 +140,7 @@ func (d *autoSeal) upgradeStoredKeys(ctx context.Context) error { func (d *autoSeal) UpgradeKeys(ctx context.Context) error { // Many of the seals update their keys to the latest KeyID when Encrypt // is called. - if _, err := d.Encrypt(ctx, []byte("a")); err != nil { + if _, err := d.Encrypt(ctx, []byte("a"), nil); err != nil { return err } @@ -384,7 +385,7 @@ func (d *autoSeal) SetRecoveryKey(ctx context.Context, key []byte) error { } // Encrypt and marshal the keys - blobInfo, err := d.Encrypt(ctx, key) + blobInfo, err := d.Encrypt(ctx, key, nil) if err != nil { return errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err) } @@ -422,12 +423,12 @@ func (d *autoSeal) getRecoveryKeyInternal(ctx context.Context) ([]byte, error) { return nil, fmt.Errorf("no recovery key found") } - blobInfo := &physical.EncryptedBlobInfo{} + blobInfo := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { return nil, errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err) } - pt, err := d.Decrypt(ctx, blobInfo) + pt, err := d.Decrypt(ctx, blobInfo, nil) if err != nil { return nil, errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err) } @@ -444,7 +445,7 @@ func (d *autoSeal) upgradeRecoveryKey(ctx context.Context) error { return fmt.Errorf("no recovery key found") } - blobInfo := &physical.EncryptedBlobInfo{} + blobInfo := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(pe.Value, blobInfo); err != nil { return errwrap.Wrapf("failed to proto decode recovery key: {{err}}", err) } @@ -452,7 +453,7 @@ func (d *autoSeal) upgradeRecoveryKey(ctx context.Context) error { if blobInfo.KeyInfo != nil && blobInfo.KeyInfo.KeyID != d.Access.KeyID() { d.logger.Info("upgrading recovery key") - pt, err := d.Decrypt(ctx, blobInfo) + pt, err := d.Decrypt(ctx, blobInfo, nil) if err != nil { return errwrap.Wrapf("failed to decrypt encrypted recovery key: {{err}}", err) diff --git a/vault/seal_autoseal_test.go b/vault/seal_autoseal_test.go index 5352eec3ea..9c98270553 100644 --- a/vault/seal_autoseal_test.go +++ b/vault/seal_autoseal_test.go @@ -7,6 +7,7 @@ import ( "testing" proto "github.com/golang/protobuf/proto" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault/seal" ) @@ -64,7 +65,7 @@ func TestAutoSeal_UpgradeKeys(t *testing.T) { var encKeys []string changeKey := func(key string) { encKeys = append(encKeys, key) - testSeal.SetKeyID(key) + testSeal.Wrapper.(*wrapping.TestWrapper).SetKeyID(key) } // Set initial encryption key. @@ -123,7 +124,7 @@ func TestAutoSeal_UpgradeKeys(t *testing.T) { // in encKeys. Iterate over each phyEntry and verify it was // encrypted with its corresponding key in encKeys. for i, phyEntry := range phyEntries { - blobInfo := &physical.EncryptedBlobInfo{} + blobInfo := &wrapping.EncryptedBlobInfo{} if err := proto.Unmarshal(phyEntry.Value, blobInfo); err != nil { t.Errorf("phyKey = %s: failed to proto decode stored keys: %s", phyKey, err) } diff --git a/vault/seal_testing.go b/vault/seal_testing.go index 1437914631..5c1baccb40 100644 --- a/vault/seal_testing.go +++ b/vault/seal_testing.go @@ -3,33 +3,28 @@ package vault import ( "context" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/vault/seal" + vaultseal "github.com/hashicorp/vault/vault/seal" testing "github.com/mitchellh/go-testing-interface" ) -type TestSealOpts struct { - Logger log.Logger - StoredKeys StoredKeysSupport - Secret []byte -} - func TestCoreUnsealedWithConfigs(t testing.T, barrierConf, recoveryConf *SealConfig) (*Core, [][]byte, [][]byte, string) { t.Helper() - opts := &TestSealOpts{} + opts := &seal.TestSealOpts{} if recoveryConf == nil { - opts.StoredKeys = StoredKeysSupportedShamirMaster + opts.StoredKeys = seal.StoredKeysSupportedShamirMaster } return TestCoreUnsealedWithConfigSealOpts(t, barrierConf, recoveryConf, opts) } -func TestCoreUnsealedWithConfigSealOpts(t testing.T, barrierConf, recoveryConf *SealConfig, sealOpts *TestSealOpts) (*Core, [][]byte, [][]byte, string) { +func TestCoreUnsealedWithConfigSealOpts(t testing.T, barrierConf, recoveryConf *SealConfig, sealOpts *seal.TestSealOpts) (*Core, [][]byte, [][]byte, string) { t.Helper() seal := NewTestSeal(t, sealOpts) core := TestCoreWithSeal(t, seal, false) result, err := core.Initialize(context.Background(), &InitParams{ BarrierConfig: barrierConf, RecoveryConfig: recoveryConf, - LegacyShamirSeal: sealOpts.StoredKeys == StoredKeysNotSupported, + LegacyShamirSeal: sealOpts.StoredKeys == vaultseal.StoredKeysNotSupported, }) if err != nil { t.Fatalf("err: %s", err) diff --git a/vault/seal_testing_util.go b/vault/seal_testing_util.go index b919f4a0d5..44e172ae60 100644 --- a/vault/seal_testing_util.go +++ b/vault/seal_testing_util.go @@ -2,24 +2,29 @@ package vault import ( "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/vault/seal" - shamirseal "github.com/hashicorp/vault/vault/seal/shamir" testing "github.com/mitchellh/go-testing-interface" ) -func NewTestSeal(t testing.T, opts *TestSealOpts) Seal { +func NewTestSeal(t testing.T, opts *seal.TestSealOpts) Seal { t.Helper() if opts == nil { - opts = &TestSealOpts{} + opts = &seal.TestSealOpts{} } if opts.Logger == nil { opts.Logger = logging.NewVaultLogger(hclog.Debug) } switch opts.StoredKeys { - case StoredKeysSupportedShamirMaster: - newSeal := NewDefaultSeal(shamirseal.NewSeal(opts.Logger)) + case seal.StoredKeysSupportedShamirMaster: + newSeal := NewDefaultSeal(&seal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: opts.Logger, + }), + }) // Need StoredShares set or this will look like a legacy shamir seal. newSeal.SetCachedBarrierConfig(&SealConfig{ StoredShares: 1, @@ -27,8 +32,12 @@ func NewTestSeal(t testing.T, opts *TestSealOpts) Seal { SecretShares: 1, }) return newSeal - case StoredKeysNotSupported: - newSeal := NewDefaultSeal(shamirseal.NewSeal(opts.Logger)) + case seal.StoredKeysNotSupported: + newSeal := NewDefaultSeal(&seal.Access{ + Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{ + Logger: opts.Logger, + }), + }) newSeal.SetCachedBarrierConfig(&SealConfig{ StoredShares: 0, SecretThreshold: 1, @@ -36,6 +45,6 @@ func NewTestSeal(t testing.T, opts *TestSealOpts) Seal { }) return newSeal default: - return NewAutoSeal(seal.NewTestSeal(opts.Secret)) + return NewAutoSeal(seal.NewTestSeal(opts)) } } diff --git a/vault/sealunwrapper.go b/vault/sealunwrapper.go index 461db11e4f..f3efa25fee 100644 --- a/vault/sealunwrapper.go +++ b/vault/sealunwrapper.go @@ -9,6 +9,7 @@ import ( proto "github.com/golang/protobuf/proto" log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/sdk/helper/locksutil" "github.com/hashicorp/vault/sdk/physical" ) @@ -69,7 +70,7 @@ func (d *sealUnwrapper) Get(ctx context.Context, key string) (*physical.Entry, e } var performUnwrap bool - se := &physical.EncryptedBlobInfo{} + se := &wrapping.EncryptedBlobInfo{} // If the value ends in our canary value, try to decode the bytes. eLen := len(entry.Value) if eLen > 0 && entry.Value[eLen-1] == 's' { @@ -106,7 +107,7 @@ func (d *sealUnwrapper) Get(ctx context.Context, key string) (*physical.Entry, e } performUnwrap = false - se = &physical.EncryptedBlobInfo{} + se = &wrapping.EncryptedBlobInfo{} // If the value ends in our canary value, try to decode the bytes. eLen = len(entry.Value) if eLen > 0 && entry.Value[eLen-1] == 's' { diff --git a/vault/sealunwrapper_test.go b/vault/sealunwrapper_test.go index d379135293..9489260eaf 100644 --- a/vault/sealunwrapper_test.go +++ b/vault/sealunwrapper_test.go @@ -10,6 +10,7 @@ import ( proto "github.com/golang/protobuf/proto" log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/sdk/physical/inmem" ) @@ -56,7 +57,7 @@ func performTestSealUnwrapper(t *testing.T, phys physical.Backend, logger log.Lo // Save the original for comparison later origBytes := make([]byte, len(entry.Value)) copy(origBytes, entry.Value) - se := &physical.EncryptedBlobInfo{ + se := &wrapping.EncryptedBlobInfo{ Ciphertext: entry.Value, } seb, err := proto.Marshal(se) diff --git a/vault/testing.go b/vault/testing.go index 7f56e402bc..d485399942 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -31,6 +31,7 @@ import ( hclog "github.com/hashicorp/go-hclog" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/vault/seal" "github.com/mitchellh/copystructure" "golang.org/x/crypto/ed25519" @@ -267,7 +268,7 @@ func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, handler http.Handl } switch core.seal.StoredKeysSupported() { - case StoredKeysNotSupported: + case seal.StoredKeysNotSupported: barrierConfig.StoredShares = 0 default: barrierConfig.StoredShares = 1 @@ -282,7 +283,7 @@ func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, handler http.Handl BarrierConfig: barrierConfig, RecoveryConfig: recoveryConfig, } - if core.seal.StoredKeysSupported() == StoredKeysNotSupported { + if core.seal.StoredKeysSupported() == seal.StoredKeysNotSupported { initParams.LegacyShamirSeal = true } result, err := core.Initialize(context.Background(), initParams) diff --git a/vault/token_store_test.go b/vault/token_store_test.go index 57a0e9b159..31fa151f44 100644 --- a/vault/token_store_test.go +++ b/vault/token_store_test.go @@ -213,7 +213,7 @@ func TestTokenStore_Salting(t *testing.T) { t.Fatalf("expected sha2-256 hmac; got sha1 hash") } - nsCtx := namespace.ContextWithNamespace(context.Background(), &namespace.Namespace{"testid", "ns1"}) + nsCtx := namespace.ContextWithNamespace(context.Background(), &namespace.Namespace{ID: "testid", Path: "ns1"}) saltedID, err = ts.SaltID(nsCtx, "foo") if err != nil { t.Fatal(err) diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml deleted file mode 100644 index ee417bbe6b..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.11.x - -go_import_path: contrib.go.opencensus.io/exporter/ocagent - -before_script: - - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any - - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any - -script: - - go build ./... # Ensure dependency updates don't break build - - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi - - go vet ./... - - GO111MODULE=on go test -v -race $PKGS # Run all the tests with the race detector enabled - - GO111MODULE=off go test -v -race $PKGS # Make sure tests still pass when not using Go modules. - - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md deleted file mode 100644 index 0786fdf434..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md +++ /dev/null @@ -1,24 +0,0 @@ -# How to contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution, -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult [GitHub Help] for more -information on using pull requests. - -[GitHub Help]: https://help.github.com/articles/about-pull-requests/ diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md deleted file mode 100644 index 3b9e908f59..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# OpenCensus Agent Go Exporter - -[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url] - - -This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter. -OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from -OpenCensus Library, export them to other backends and possibly push configurations back to -Library. See more details on [OC-Agent Readme][OCAgentReadme]. - -Note: This is an experimental repository and is likely to get backwards-incompatible changes. -Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo]. - -## Installation - -```bash -$ go get -u contrib.go.opencensus.io/exporter/ocagent -``` - -## Usage - -```go -import ( - "context" - "fmt" - "log" - "time" - - "contrib.go.opencensus.io/exporter/ocagent" - "go.opencensus.io/trace" -) - -func Example() { - exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name")) - if err != nil { - log.Fatalf("Failed to create the agent exporter: %v", err) - } - defer exp.Stop() - - // Now register it as a trace exporter. - trace.RegisterExporter(exp) - - // Then use the OpenCensus tracing library, like we normally would. - ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example") - defer span.End() - - for i := 0; i < 10; i++ { - _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i)) - <-time.After(6 * time.Millisecond) - iSpan.End() - } -} -``` - -[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto -[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go -[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg -[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent -[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master -[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent - diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go deleted file mode 100644 index 297e44b6e7..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "math/rand" - "time" -) - -var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) - -// retries function fn upto n times, if fn returns an error lest it returns nil early. -// It applies exponential backoff in units of (1< 0 { - ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) - } - traceExporter, err := traceSvcClient.Export(ctx) - if err != nil { - return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) - } - - firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{ - Node: node, - Resource: ae.resource, - } - if err := traceExporter.Send(firstTraceMessage); err != nil { - return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) - } - - ae.mu.Lock() - ae.traceExporter = traceExporter - ae.mu.Unlock() - - // Initiate the config service by sending over node identifier info. - configStream, err := traceSvcClient.Config(context.Background()) - if err != nil { - return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) - } - firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node} - if err := configStream.Send(firstCfgMessage); err != nil { - return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) - } - - // In the background, handle trace configurations that are beamed down - // by the agent, but also reply to it with the applied configuration. - go ae.handleConfigStreaming(configStream) - - return nil -} - -func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error { - metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc) - metricsExporter, err := metricsSvcClient.Export(context.Background()) - if err != nil { - return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err) - } - // Initiate the metrics service by sending over the first message just containing the Node and Resource. - firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{ - Node: node, - Resource: ae.resource, - } - if err := metricsExporter.Send(firstMetricsMessage); err != nil { - return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err) - } - - ae.mu.Lock() - ae.metricsExporter = metricsExporter - ae.mu.Unlock() - - // With that we are good to go and can start sending metrics - return nil -} - -func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { - addr := ae.prepareAgentAddress() - var dialOpts []grpc.DialOption - if ae.clientTransportCredentials != nil { - dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials)) - } else if ae.canDialInsecure { - dialOpts = append(dialOpts, grpc.WithInsecure()) - } - if ae.compressor != "" { - dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor))) - } - dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) - - ctx := context.Background() - if len(ae.headers) > 0 { - ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) - } - return grpc.DialContext(ctx, addr, dialOpts...) -} - -func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { - // Note: We haven't yet implemented configuration sending so we - // should NOT be changing connection states within this function for now. - for { - recv, err := configStream.Recv() - if err != nil { - // TODO: Check if this is a transient error or exponential backoff-able. - return err - } - cfg := recv.Config - if cfg == nil { - continue - } - - // Otherwise now apply the trace configuration sent down from the agent - if psamp := cfg.GetProbabilitySampler(); psamp != nil { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) - } else if csamp := cfg.GetConstantSampler(); csamp != nil { - alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON - if alwaysSample { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - } else { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) - } - } else { // TODO: Add the rate limiting sampler here - } - - // Then finally send back to upstream the newly applied configuration - err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}}) - if err != nil { - return err - } - } -} - -// Stop shuts down all the connections and resources -// related to the exporter. -func (ae *Exporter) Stop() error { - ae.mu.RLock() - cc := ae.grpcClientConn - started := ae.started - stopped := ae.stopped - ae.mu.RUnlock() - - if !started { - return errNotStarted - } - if stopped { - // TODO: tell the user that we've already stopped, so perhaps a sentinel error? - return nil - } - - ae.Flush() - - // Now close the underlying gRPC connection. - var err error - if cc != nil { - err = cc.Close() - } - - // At this point we can change the state variables: started and stopped - ae.mu.Lock() - ae.started = false - ae.stopped = true - ae.mu.Unlock() - close(ae.stopCh) - - // Ensure that the backgroundConnector returns - <-ae.backgroundConnectionDoneCh - - return err -} - -func (ae *Exporter) ExportSpan(sd *trace.SpanData) { - if sd == nil { - return - } - _ = ae.traceBundler.Add(sd, 1) -} - -func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error { - if batch == nil || len(batch.Spans) == 0 { - return nil - } - - select { - case <-ae.stopCh: - return errStopped - - default: - if !ae.connected() { - return errNoConnection - } - - ae.senderMu.Lock() - err := ae.traceExporter.Send(batch) - ae.senderMu.Unlock() - if err != nil { - ae.setStateDisconnected() - return err - } - return nil - } -} - -func (ae *Exporter) ExportView(vd *view.Data) { - if vd == nil { - return - } - _ = ae.viewDataBundler.Add(vd, 1) -} - -func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span { - if len(sdl) == 0 { - return nil - } - protoSpans := make([]*tracepb.Span, 0, len(sdl)) - for _, sd := range sdl { - if sd != nil { - protoSpans = append(protoSpans, ocSpanToProtoSpan(sd)) - } - } - return protoSpans -} - -func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { - select { - case <-ae.stopCh: - return - - default: - if !ae.connected() { - return - } - - protoSpans := ocSpanDataToPbSpans(sdl) - if len(protoSpans) == 0 { - return - } - ae.senderMu.Lock() - err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ - Spans: protoSpans, - }) - ae.senderMu.Unlock() - if err != nil { - ae.setStateDisconnected() - } - } -} - -func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric { - if len(vdl) == 0 { - return nil - } - metrics := make([]*metricspb.Metric, 0, len(vdl)) - for _, vd := range vdl { - if vd != nil { - vmetric, err := viewDataToMetric(vd) - // TODO: (@odeke-em) somehow report this error, if it is non-nil. - if err == nil && vmetric != nil { - metrics = append(metrics, vmetric) - } - } - } - return metrics -} - -func (ae *Exporter) uploadViewData(vdl []*view.Data) { - select { - case <-ae.stopCh: - return - - default: - if !ae.connected() { - return - } - - protoMetrics := ocViewDataToPbMetrics(vdl) - if len(protoMetrics) == 0 { - return - } - err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{ - Metrics: protoMetrics, - // TODO:(@odeke-em) - // a) Figure out how to derive a Node from the environment - // b) Figure out how to derive a Resource from the environment - // or better letting users of the exporter configure it. - }) - if err != nil { - ae.setStateDisconnected() - } - } -} - -func (ae *Exporter) Flush() { - ae.traceBundler.Flush() - ae.viewDataBundler.Flush() -} - -func resourceProtoFromEnv() *resourcepb.Resource { - rs, _ := resource.FromEnv(context.Background()) - if rs == nil { - return nil - } - - rprs := &resourcepb.Resource{ - Type: rs.Type, - } - if rs.Labels != nil { - rprs.Labels = make(map[string]string) - for k, v := range rs.Labels { - rprs.Labels[k] = v - } - } - return rprs -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go deleted file mode 100644 index 3e05ae8b30..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "time" - - "google.golang.org/grpc/credentials" -) - -const ( - DefaultAgentPort uint16 = 55678 - DefaultAgentHost string = "localhost" -) - -type ExporterOption interface { - withExporter(e *Exporter) -} - -type insecureGrpcConnection int - -var _ ExporterOption = (*insecureGrpcConnection)(nil) - -func (igc *insecureGrpcConnection) withExporter(e *Exporter) { - e.canDialInsecure = true -} - -// WithInsecure disables client transport security for the exporter's gRPC connection -// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure -// does. Note, by default, client security is required unless WithInsecure is used. -func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } - -type addressSetter string - -func (as addressSetter) withExporter(e *Exporter) { - e.agentAddress = string(as) -} - -var _ ExporterOption = (*addressSetter)(nil) - -// WithAddress allows one to set the address that the exporter will -// connect to the agent on. If unset, it will instead try to use -// connect to DefaultAgentHost:DefaultAgentPort -func WithAddress(addr string) ExporterOption { - return addressSetter(addr) -} - -type serviceNameSetter string - -func (sns serviceNameSetter) withExporter(e *Exporter) { - e.serviceName = string(sns) -} - -var _ ExporterOption = (*serviceNameSetter)(nil) - -// WithServiceName allows one to set/override the service name -// that the exporter will report to the agent. -func WithServiceName(serviceName string) ExporterOption { - return serviceNameSetter(serviceName) -} - -type reconnectionPeriod time.Duration - -func (rp reconnectionPeriod) withExporter(e *Exporter) { - e.reconnectionPeriod = time.Duration(rp) -} - -func WithReconnectionPeriod(rp time.Duration) ExporterOption { - return reconnectionPeriod(rp) -} - -type compressorSetter string - -func (c compressorSetter) withExporter(e *Exporter) { - e.compressor = string(c) -} - -// UseCompressor will set the compressor for the gRPC client to use when sending requests. -// It is the responsibility of the caller to ensure that the compressor set has been registered -// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some -// compressors auto-register on import, such as gzip, which can be registered by calling -// `import _ "google.golang.org/grpc/encoding/gzip"` -func UseCompressor(compressorName string) ExporterOption { - return compressorSetter(compressorName) -} - -type headerSetter map[string]string - -func (h headerSetter) withExporter(e *Exporter) { - e.headers = map[string]string(h) -} - -// WithHeaders will send the provided headers when the gRPC stream connection -// is instantiated -func WithHeaders(headers map[string]string) ExporterOption { - return headerSetter(headers) -} - -type clientCredentials struct { - credentials.TransportCredentials -} - -var _ ExporterOption = (*clientCredentials)(nil) - -// WithTLSCredentials allows the connection to use TLS credentials -// when talking to the server. It takes in grpc.TransportCredentials instead -// of say a Certificate file or a tls.Certificate, because the retrieving -// these credentials can be done in many ways e.g. plain file, in code tls.Config -// or by certificate rotation, so it is up to the caller to decide what to use. -func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption { - return &clientCredentials{TransportCredentials: creds} -} - -func (cc *clientCredentials) withExporter(e *Exporter) { - e.clientTransportCredentials = cc.TransportCredentials -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go deleted file mode 100644 index 983ebe7b70..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "math" - "time" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/tracestate" - - tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - maxAnnotationEventsPerSpan = 32 - maxMessageEventsPerSpan = 128 -) - -func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { - if sd == nil { - return nil - } - var namePtr *tracepb.TruncatableString - if sd.Name != "" { - namePtr = &tracepb.TruncatableString{Value: sd.Name} - } - return &tracepb.Span{ - TraceId: sd.TraceID[:], - SpanId: sd.SpanID[:], - ParentSpanId: sd.ParentSpanID[:], - Status: ocStatusToProtoStatus(sd.Status), - StartTime: timeToTimestamp(sd.StartTime), - EndTime: timeToTimestamp(sd.EndTime), - Links: ocLinksToProtoLinks(sd.Links), - Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), - Name: namePtr, - Attributes: ocAttributesToProtoAttributes(sd.Attributes), - TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents), - Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), - } -} - -var blankStatus trace.Status - -func ocStatusToProtoStatus(status trace.Status) *tracepb.Status { - if status == blankStatus { - return nil - } - return &tracepb.Status{ - Code: status.Code, - Message: status.Message, - } -} - -func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links { - if len(links) == 0 { - return nil - } - - sl := make([]*tracepb.Span_Link, 0, len(links)) - for _, ocLink := range links { - // This redefinition is necessary to prevent ocLink.*ID[:] copies - // being reused -- in short we need a new ocLink per iteration. - ocLink := ocLink - - sl = append(sl, &tracepb.Span_Link{ - TraceId: ocLink.TraceID[:], - SpanId: ocLink.SpanID[:], - Type: ocLinkTypeToProtoLinkType(ocLink.Type), - }) - } - - return &tracepb.Span_Links{ - Link: sl, - } -} - -func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type { - switch oct { - case trace.LinkTypeChild: - return tracepb.Span_Link_CHILD_LINKED_SPAN - case trace.LinkTypeParent: - return tracepb.Span_Link_PARENT_LINKED_SPAN - default: - return tracepb.Span_Link_TYPE_UNSPECIFIED - } -} - -func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes { - if len(attrs) == 0 { - return nil - } - outMap := make(map[string]*tracepb.AttributeValue) - for k, v := range attrs { - switch v := v.(type) { - case bool: - outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}} - - case int: - outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}} - - case int64: - outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}} - - case string: - outMap[k] = &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{ - StringValue: &tracepb.TruncatableString{Value: v}, - }, - } - } - } - return &tracepb.Span_Attributes{ - AttributeMap: outMap, - } -} - -// This code is mostly copied from -// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46 -func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents { - if len(as) == 0 && len(es) == 0 { - return nil - } - - timeEvents := &tracepb.Span_TimeEvents{} - var annotations, droppedAnnotationsCount int - var messageEvents, droppedMessageEventsCount int - - // Transform annotations - for i, a := range as { - if annotations >= maxAnnotationEventsPerSpan { - droppedAnnotationsCount = len(as) - i - break - } - annotations++ - timeEvents.TimeEvent = append(timeEvents.TimeEvent, - &tracepb.Span_TimeEvent{ - Time: timeToTimestamp(a.Time), - Value: transformAnnotationToTimeEvent(&a), - }, - ) - } - - // Transform message events - for i, e := range es { - if messageEvents >= maxMessageEventsPerSpan { - droppedMessageEventsCount = len(es) - i - break - } - messageEvents++ - timeEvents.TimeEvent = append(timeEvents.TimeEvent, - &tracepb.Span_TimeEvent{ - Time: timeToTimestamp(e.Time), - Value: transformMessageEventToTimeEvent(&e), - }, - ) - } - - // Process dropped counter - timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) - timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) - - return timeEvents -} - -func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ { - return &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: &tracepb.TruncatableString{Value: a.Message}, - Attributes: ocAttributesToProtoAttributes(a.Attributes), - }, - } -} - -func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ { - return &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), - Id: uint64(e.MessageID), - UncompressedSize: uint64(e.UncompressedByteSize), - CompressedSize: uint64(e.CompressedByteSize), - }, - } -} - -// clip32 clips an int to the range of an int32. -func clip32(x int) int32 { - if x < math.MinInt32 { - return math.MinInt32 - } - if x > math.MaxInt32 { - return math.MaxInt32 - } - return int32(x) -} - -func timeToTimestamp(t time.Time) *timestamp.Timestamp { - nanoTime := t.UnixNano() - return ×tamp.Timestamp{ - Seconds: nanoTime / 1e9, - Nanos: int32(nanoTime % 1e9), - } -} - -func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind { - switch kind { - case trace.SpanKindClient: - return tracepb.Span_CLIENT - case trace.SpanKindServer: - return tracepb.Span_SERVER - default: - return tracepb.Span_SPAN_KIND_UNSPECIFIED - } -} - -func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate { - if ts == nil { - return nil - } - return &tracepb.Span_Tracestate{ - Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()), - } -} - -func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry { - protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries)) - for _, entry := range entries { - protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{ - Key: entry.Key, - Value: entry.Value, - }) - } - return protoEntries -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go deleted file mode 100644 index 43f18dec19..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "errors" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - "github.com/golang/protobuf/ptypes/timestamp" - - metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" -) - -var ( - errNilMeasure = errors.New("expecting a non-nil stats.Measure") - errNilView = errors.New("expecting a non-nil view.View") - errNilViewData = errors.New("expecting a non-nil view.Data") -) - -func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) { - if vd == nil { - return nil, errNilViewData - } - - descriptor, err := viewToMetricDescriptor(vd.View) - if err != nil { - return nil, err - } - - timeseries, err := viewDataToTimeseries(vd) - if err != nil { - return nil, err - } - - metric := &metricspb.Metric{ - MetricDescriptor: descriptor, - Timeseries: timeseries, - } - return metric, nil -} - -func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) { - if v == nil { - return nil, errNilView - } - if v.Measure == nil { - return nil, errNilMeasure - } - - desc := &metricspb.MetricDescriptor{ - Name: stringOrCall(v.Name, v.Measure.Name), - Description: stringOrCall(v.Description, v.Measure.Description), - Unit: v.Measure.Unit(), - Type: aggregationToMetricDescriptorType(v), - LabelKeys: tagKeysToLabelKeys(v.TagKeys), - } - return desc, nil -} - -func stringOrCall(first string, call func() string) string { - if first != "" { - return first - } - return call() -} - -type measureType uint - -const ( - measureUnknown measureType = iota - measureInt64 - measureFloat64 -) - -func measureTypeFromMeasure(m stats.Measure) measureType { - switch m.(type) { - default: - return measureUnknown - case *stats.Float64Measure: - return measureFloat64 - case *stats.Int64Measure: - return measureInt64 - } -} - -func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type { - if v == nil || v.Aggregation == nil { - return metricspb.MetricDescriptor_UNSPECIFIED - } - if v.Measure == nil { - return metricspb.MetricDescriptor_UNSPECIFIED - } - - switch v.Aggregation.Type { - case view.AggTypeCount: - // Cumulative on int64 - return metricspb.MetricDescriptor_CUMULATIVE_INT64 - - case view.AggTypeDistribution: - // Cumulative types - return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION - - case view.AggTypeLastValue: - // Gauge types - switch measureTypeFromMeasure(v.Measure) { - case measureFloat64: - return metricspb.MetricDescriptor_GAUGE_DOUBLE - case measureInt64: - return metricspb.MetricDescriptor_GAUGE_INT64 - } - - case view.AggTypeSum: - // Cumulative types - switch measureTypeFromMeasure(v.Measure) { - case measureFloat64: - return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE - case measureInt64: - return metricspb.MetricDescriptor_CUMULATIVE_INT64 - } - } - - // For all other cases, return unspecified. - return metricspb.MetricDescriptor_UNSPECIFIED -} - -func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey { - labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys)) - for _, tagKey := range tagKeys { - labelKeys = append(labelKeys, &metricspb.LabelKey{ - Key: tagKey.Name(), - }) - } - return labelKeys -} - -func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) { - if vd == nil || len(vd.Rows) == 0 { - return nil, nil - } - - // Given that view.Data only contains Start, End - // the timestamps for all the row data will be the exact same - // per aggregation. However, the values will differ. - // Each row has its own tags. - startTimestamp := timeToProtoTimestamp(vd.Start) - endTimestamp := timeToProtoTimestamp(vd.End) - - mType := measureTypeFromMeasure(vd.View.Measure) - timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows)) - // It is imperative that the ordering of "LabelValues" matches those - // of the Label keys in the metric descriptor. - for _, row := range vd.Rows { - labelValues := labelValuesFromTags(row.Tags) - point := rowToPoint(vd.View, row, endTimestamp, mType) - timeseries = append(timeseries, &metricspb.TimeSeries{ - StartTimestamp: startTimestamp, - LabelValues: labelValues, - Points: []*metricspb.Point{point}, - }) - } - - if len(timeseries) == 0 { - return nil, nil - } - - return timeseries, nil -} - -func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp { - unixNano := t.UnixNano() - return ×tamp.Timestamp{ - Seconds: int64(unixNano / 1e9), - Nanos: int32(unixNano % 1e9), - } -} - -func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point { - pt := &metricspb.Point{ - Timestamp: endTimestamp, - } - - switch data := row.Data.(type) { - case *view.CountData: - pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value} - - case *view.DistributionData: - pt.Value = &metricspb.Point_DistributionValue{ - DistributionValue: &metricspb.DistributionValue{ - Count: data.Count, - Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count - // TODO: Add Exemplar - Buckets: bucketsToProtoBuckets(data.CountPerBucket), - BucketOptions: &metricspb.DistributionValue_BucketOptions{ - Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ - Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ - Bounds: v.Aggregation.Buckets, - }, - }, - }, - SumOfSquaredDeviation: data.SumOfSquaredDev, - }} - - case *view.LastValueData: - setPointValue(pt, data.Value, mType) - - case *view.SumData: - setPointValue(pt, data.Value, mType) - } - - return pt -} - -// Not returning anything from this function because metricspb.Point.is_Value is an unexported -// interface hence we just have to set its value by pointer. -func setPointValue(pt *metricspb.Point, value float64, mType measureType) { - if mType == measureInt64 { - pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)} - } else { - pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value} - } -} - -func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket { - distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket)) - for i := 0; i < len(countPerBucket); i++ { - count := countPerBucket[i] - - distBuckets[i] = &metricspb.DistributionValue_Bucket{ - Count: count, - } - } - - return distBuckets -} - -func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue { - if len(tags) == 0 { - return nil - } - - labelValues := make([]*metricspb.LabelValue, 0, len(tags)) - for _, tag_ := range tags { - labelValues = append(labelValues, &metricspb.LabelValue{ - Value: tag_.Value, - - // It is imperative that we set the "HasValue" attribute, - // in order to distinguish missing a label from the empty string. - // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue - // - // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments, - // so the best case that we can use to distinguish missing labels/tags from the - // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have - // a value. - HasValue: tag_.Key.Name() != "", - }) - } - return labelValues -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go deleted file mode 100644 index 68be4c75bd..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -const Version = "0.0.1" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/availabilitysets.go index 47b00f2266..e849b72258 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/availabilitysets.go @@ -103,8 +103,8 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context, // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -180,8 +180,8 @@ func (client AvailabilitySetsClient) DeletePreparer(ctx context.Context, resourc // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always @@ -257,8 +257,8 @@ func (client AvailabilitySetsClient) GetPreparer(ctx context.Context, resourceGr // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -333,8 +333,8 @@ func (client AvailabilitySetsClient) ListPreparer(ctx context.Context, resourceG // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -448,8 +448,8 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(ctx context.Cont // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always @@ -466,7 +466,9 @@ func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Resp } // ListBySubscription lists all availability sets in a subscription. -func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (result AvailabilitySetListResultPage, err error) { +// Parameters: +// expand - the expand expression to apply to the operation. +func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context, expand string) (result AvailabilitySetListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListBySubscription") defer func() { @@ -478,7 +480,7 @@ func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (re }() } result.fn = client.listBySubscriptionNextResults - req, err := client.ListBySubscriptionPreparer(ctx) + req, err := client.ListBySubscriptionPreparer(ctx, expand) if err != nil { err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListBySubscription", nil, "Failure preparing request") return @@ -500,7 +502,7 @@ func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (re } // ListBySubscriptionPreparer prepares the ListBySubscription request. -func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { +func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Context, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } @@ -509,6 +511,9 @@ func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Cont queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -521,8 +526,8 @@ func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Cont // ListBySubscriptionSender sends the ListBySubscription request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always @@ -560,7 +565,7 @@ func (client AvailabilitySetsClient) listBySubscriptionNextResults(ctx context.C } // ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required. -func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Context) (result AvailabilitySetListResultIterator, err error) { +func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Context, expand string) (result AvailabilitySetListResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListBySubscription") defer func() { @@ -571,7 +576,7 @@ func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Cont tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.ListBySubscription(ctx) + result.page, err = client.ListBySubscription(ctx, expand) return } @@ -638,8 +643,8 @@ func (client AvailabilitySetsClient) UpdatePreparer(ctx context.Context, resourc // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // UpdateResponder handles the response to the Update request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/client.go index e0edcfb124..b23c9ca742 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/client.go @@ -1,4 +1,4 @@ -// Package compute implements the Azure ARM Compute service API version 2017-12-01. +// Package compute implements the Azure ARM Compute service API version . // // Compute Client package compute diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/disks.go new file mode 100644 index 0000000000..3256a59cd4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/disks.go @@ -0,0 +1,776 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DisksClient is the compute Client +type DisksClient struct { + BaseClient +} + +// NewDisksClient creates an instance of the DisksClient client. +func NewDisksClient(subscriptionID string) DisksClient { + return NewDisksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDisksClientWithBaseURI creates an instance of the DisksClient client. +func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient { + return DisksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a disk. +// Parameters: +// resourceGroupName - the name of the resource group. +// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is +// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// characters. +// disk - disk object supplied in the body of the Put disk operation. +func (client DisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk) (result DisksCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: disk, + Constraints: []validation.Constraint{{Target: "disk.DiskProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "disk.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("compute.DisksClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, diskName, disk) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, diskName string, disk Disk) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + disk.ManagedBy = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(disk), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) CreateOrUpdateSender(req *http.Request) (future DisksCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result Disk, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a disk. +// Parameters: +// resourceGroupName - the name of the resource group. +// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is +// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// characters. +func (client DisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (result DisksDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, diskName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) DeleteSender(req *http.Request) (future DisksDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DisksClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a disk. +// Parameters: +// resourceGroupName - the name of the resource group. +// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is +// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// characters. +func (client DisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result Disk, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, diskName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DisksClient) GetResponder(resp *http.Response) (result Disk, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a disk. +// Parameters: +// resourceGroupName - the name of the resource group. +// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is +// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// characters. +// grantAccessData - access data object supplied in the body of the get disk access operation. +func (client DisksClient) GrantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData) (result DisksGrantAccessFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.GrantAccess") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("compute.DisksClient", "GrantAccess", err.Error()) + } + + req, err := client.GrantAccessPreparer(ctx, resourceGroupName, diskName, grantAccessData) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", nil, "Failure preparing request") + return + } + + result, err = client.GrantAccessSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", result.Response(), "Failure sending request") + return + } + + return +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GrantAccessSender(req *http.Request) (future DisksGrantAccessFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the disks under a subscription. +func (client DisksClient) List(ctx context.Context) (result DiskListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.List") + defer func() { + sc := -1 + if result.dl.Response.Response != nil { + sc = result.dl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.dl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure sending request") + return + } + + result.dl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DisksClient) ListResponder(resp *http.Response) (result DiskList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client DisksClient) listNextResults(ctx context.Context, lastResults DiskList) (result DiskList, err error) { + req, err := lastResults.diskListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client DisksClient) ListComplete(ctx context.Context) (result DiskListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup lists all the disks under a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client DisksClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DiskListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.dl.Response.Response != nil { + sc = result.dl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.dl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.dl, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client DisksClient) ListByResourceGroupResponder(resp *http.Response) (result DiskList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client DisksClient) listByResourceGroupNextResults(ctx context.Context, lastResults DiskList) (result DiskList, err error) { + req, err := lastResults.diskListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client DisksClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DiskListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// RevokeAccess revokes access to a disk. +// Parameters: +// resourceGroupName - the name of the resource group. +// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is +// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// characters. +func (client DisksClient) RevokeAccess(ctx context.Context, resourceGroupName string, diskName string) (result DisksRevokeAccessFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.RevokeAccess") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RevokeAccessPreparer(ctx, resourceGroupName, diskName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + result, err = client.RevokeAccessSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", result.Response(), "Failure sending request") + return + } + + return +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) RevokeAccessSender(req *http.Request) (future DisksRevokeAccessFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a disk. +// Parameters: +// resourceGroupName - the name of the resource group. +// diskName - the name of the managed disk that is being created. The name can't be changed after the disk is +// created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 +// characters. +// disk - disk object supplied in the body of the Patch disk operation. +func (client DisksClient) Update(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate) (result DisksUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DisksClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, diskName, disk) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(disk), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) UpdateSender(req *http.Request) (future DisksUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client DisksClient) UpdateResponder(resp *http.Response) (result Disk, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/images.go index 3dfcb1162a..0b4ca5785e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/images.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/images.go @@ -97,9 +97,9 @@ func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceG // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (future ImagesCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -174,9 +174,9 @@ func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) DeleteSender(req *http.Request) (future ImagesDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -261,8 +261,8 @@ func (client ImagesClient) GetPreparer(ctx context.Context, resourceGroupName st // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -335,8 +335,8 @@ func (client ImagesClient) ListPreparer(ctx context.Context) (*http.Request, err // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -448,8 +448,8 @@ func (client ImagesClient) ListByResourceGroupPreparer(ctx context.Context, reso // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -559,9 +559,9 @@ func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) UpdateSender(req *http.Request) (future ImagesUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/loganalytics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/loganalytics.go index b1b2ef19b3..4ff599d27f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/loganalytics.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/loganalytics.go @@ -103,9 +103,9 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context // ExportRequestRateByIntervalSender sends the ExportRequestRateByInterval request. The method will close the // http.Response Body if it receives an error. func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Request) (future LogAnalyticsExportRequestRateByIntervalFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -188,9 +188,9 @@ func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Con // ExportThrottledRequestsSender sends the ExportThrottledRequests request. The method will close the // http.Response Body if it receives an error. func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request) (future LogAnalyticsExportThrottledRequestsFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/models.go index 6aac0e3dc0..e224b7c120 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/models.go @@ -31,21 +31,36 @@ import ( // The package's fully qualified name. const fqdn = "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute" +// AccessLevel enumerates the values for access level. +type AccessLevel string + +const ( + // None ... + None AccessLevel = "None" + // Read ... + Read AccessLevel = "Read" +) + +// PossibleAccessLevelValues returns an array of possible values for the AccessLevel const type. +func PossibleAccessLevelValues() []AccessLevel { + return []AccessLevel{None, Read} +} + // CachingTypes enumerates the values for caching types. type CachingTypes string const ( - // None ... - None CachingTypes = "None" - // ReadOnly ... - ReadOnly CachingTypes = "ReadOnly" - // ReadWrite ... - ReadWrite CachingTypes = "ReadWrite" + // CachingTypesNone ... + CachingTypesNone CachingTypes = "None" + // CachingTypesReadOnly ... + CachingTypesReadOnly CachingTypes = "ReadOnly" + // CachingTypesReadWrite ... + CachingTypesReadWrite CachingTypes = "ReadWrite" ) // PossibleCachingTypesValues returns an array of possible values for the CachingTypes const type. func PossibleCachingTypesValues() []CachingTypes { - return []CachingTypes{None, ReadOnly, ReadWrite} + return []CachingTypes{CachingTypesNone, CachingTypesReadOnly, CachingTypesReadWrite} } // ComponentNames enumerates the values for component names. @@ -61,21 +76,42 @@ func PossibleComponentNamesValues() []ComponentNames { return []ComponentNames{MicrosoftWindowsShellSetup} } +// DiskCreateOption enumerates the values for disk create option. +type DiskCreateOption string + +const ( + // Attach ... + Attach DiskCreateOption = "Attach" + // Copy ... + Copy DiskCreateOption = "Copy" + // Empty ... + Empty DiskCreateOption = "Empty" + // FromImage ... + FromImage DiskCreateOption = "FromImage" + // Import ... + Import DiskCreateOption = "Import" +) + +// PossibleDiskCreateOptionValues returns an array of possible values for the DiskCreateOption const type. +func PossibleDiskCreateOptionValues() []DiskCreateOption { + return []DiskCreateOption{Attach, Copy, Empty, FromImage, Import} +} + // DiskCreateOptionTypes enumerates the values for disk create option types. type DiskCreateOptionTypes string const ( - // Attach ... - Attach DiskCreateOptionTypes = "Attach" - // Empty ... - Empty DiskCreateOptionTypes = "Empty" - // FromImage ... - FromImage DiskCreateOptionTypes = "FromImage" + // DiskCreateOptionTypesAttach ... + DiskCreateOptionTypesAttach DiskCreateOptionTypes = "Attach" + // DiskCreateOptionTypesEmpty ... + DiskCreateOptionTypesEmpty DiskCreateOptionTypes = "Empty" + // DiskCreateOptionTypesFromImage ... + DiskCreateOptionTypesFromImage DiskCreateOptionTypes = "FromImage" ) // PossibleDiskCreateOptionTypesValues returns an array of possible values for the DiskCreateOptionTypes const type. func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { - return []DiskCreateOptionTypes{Attach, Empty, FromImage} + return []DiskCreateOptionTypes{DiskCreateOptionTypesAttach, DiskCreateOptionTypesEmpty, DiskCreateOptionTypesFromImage} } // InstanceViewTypes enumerates the values for instance view types. @@ -221,6 +257,53 @@ func PossibleResourceIdentityTypeValues() []ResourceIdentityType { return []ResourceIdentityType{ResourceIdentityTypeNone, ResourceIdentityTypeSystemAssigned, ResourceIdentityTypeSystemAssignedUserAssigned, ResourceIdentityTypeUserAssigned} } +// ResourceSkuCapacityScaleType enumerates the values for resource sku capacity scale type. +type ResourceSkuCapacityScaleType string + +const ( + // ResourceSkuCapacityScaleTypeAutomatic ... + ResourceSkuCapacityScaleTypeAutomatic ResourceSkuCapacityScaleType = "Automatic" + // ResourceSkuCapacityScaleTypeManual ... + ResourceSkuCapacityScaleTypeManual ResourceSkuCapacityScaleType = "Manual" + // ResourceSkuCapacityScaleTypeNone ... + ResourceSkuCapacityScaleTypeNone ResourceSkuCapacityScaleType = "None" +) + +// PossibleResourceSkuCapacityScaleTypeValues returns an array of possible values for the ResourceSkuCapacityScaleType const type. +func PossibleResourceSkuCapacityScaleTypeValues() []ResourceSkuCapacityScaleType { + return []ResourceSkuCapacityScaleType{ResourceSkuCapacityScaleTypeAutomatic, ResourceSkuCapacityScaleTypeManual, ResourceSkuCapacityScaleTypeNone} +} + +// ResourceSkuRestrictionsReasonCode enumerates the values for resource sku restrictions reason code. +type ResourceSkuRestrictionsReasonCode string + +const ( + // NotAvailableForSubscription ... + NotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription" + // QuotaID ... + QuotaID ResourceSkuRestrictionsReasonCode = "QuotaId" +) + +// PossibleResourceSkuRestrictionsReasonCodeValues returns an array of possible values for the ResourceSkuRestrictionsReasonCode const type. +func PossibleResourceSkuRestrictionsReasonCodeValues() []ResourceSkuRestrictionsReasonCode { + return []ResourceSkuRestrictionsReasonCode{NotAvailableForSubscription, QuotaID} +} + +// ResourceSkuRestrictionsType enumerates the values for resource sku restrictions type. +type ResourceSkuRestrictionsType string + +const ( + // Location ... + Location ResourceSkuRestrictionsType = "Location" + // Zone ... + Zone ResourceSkuRestrictionsType = "Zone" +) + +// PossibleResourceSkuRestrictionsTypeValues returns an array of possible values for the ResourceSkuRestrictionsType const type. +func PossibleResourceSkuRestrictionsTypeValues() []ResourceSkuRestrictionsType { + return []ResourceSkuRestrictionsType{Location, Zone} +} + // RollingUpgradeActionType enumerates the values for rolling upgrade action type. type RollingUpgradeActionType string @@ -743,6 +826,91 @@ func PossibleVirtualMachineSizeTypesValues() []VirtualMachineSizeTypes { return []VirtualMachineSizeTypes{BasicA0, BasicA1, BasicA2, BasicA3, BasicA4, StandardA0, StandardA1, StandardA10, StandardA11, StandardA1V2, StandardA2, StandardA2mV2, StandardA2V2, StandardA3, StandardA4, StandardA4mV2, StandardA4V2, StandardA5, StandardA6, StandardA7, StandardA8, StandardA8mV2, StandardA8V2, StandardA9, StandardB1ms, StandardB1s, StandardB2ms, StandardB2s, StandardB4ms, StandardB8ms, StandardD1, StandardD11, StandardD11V2, StandardD12, StandardD12V2, StandardD13, StandardD13V2, StandardD14, StandardD14V2, StandardD15V2, StandardD16sV3, StandardD16V3, StandardD1V2, StandardD2, StandardD2sV3, StandardD2V2, StandardD2V3, StandardD3, StandardD32sV3, StandardD32V3, StandardD3V2, StandardD4, StandardD4sV3, StandardD4V2, StandardD4V3, StandardD5V2, StandardD64sV3, StandardD64V3, StandardD8sV3, StandardD8V3, StandardDS1, StandardDS11, StandardDS11V2, StandardDS12, StandardDS12V2, StandardDS13, StandardDS132V2, StandardDS134V2, StandardDS13V2, StandardDS14, StandardDS144V2, StandardDS148V2, StandardDS14V2, StandardDS15V2, StandardDS1V2, StandardDS2, StandardDS2V2, StandardDS3, StandardDS3V2, StandardDS4, StandardDS4V2, StandardDS5V2, StandardE16sV3, StandardE16V3, StandardE2sV3, StandardE2V3, StandardE3216V3, StandardE328sV3, StandardE32sV3, StandardE32V3, StandardE4sV3, StandardE4V3, StandardE6416sV3, StandardE6432sV3, StandardE64sV3, StandardE64V3, StandardE8sV3, StandardE8V3, StandardF1, StandardF16, StandardF16s, StandardF16sV2, StandardF1s, StandardF2, StandardF2s, StandardF2sV2, StandardF32sV2, StandardF4, StandardF4s, StandardF4sV2, StandardF64sV2, StandardF72sV2, StandardF8, StandardF8s, StandardF8sV2, StandardG1, StandardG2, StandardG3, StandardG4, StandardG5, StandardGS1, StandardGS2, StandardGS3, StandardGS4, StandardGS44, StandardGS48, StandardGS5, StandardGS516, StandardGS58, StandardH16, StandardH16m, StandardH16mr, StandardH16r, StandardH8, StandardH8m, StandardL16s, StandardL32s, StandardL4s, StandardL8s, StandardM12832ms, StandardM12864ms, StandardM128ms, StandardM128s, StandardM6416ms, StandardM6432ms, StandardM64ms, StandardM64s, StandardNC12, StandardNC12sV2, StandardNC12sV3, StandardNC24, StandardNC24r, StandardNC24rsV2, StandardNC24rsV3, StandardNC24sV2, StandardNC24sV3, StandardNC6, StandardNC6sV2, StandardNC6sV3, StandardND12s, StandardND24rs, StandardND24s, StandardND6s, StandardNV12, StandardNV24, StandardNV6} } +// AccessURI a disk access SAS uri. +type AccessURI struct { + autorest.Response `json:"-"` + // AccessURIOutput - Operation output data (raw JSON) + *AccessURIOutput `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccessURI. +func (au AccessURI) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if au.AccessURIOutput != nil { + objectMap["properties"] = au.AccessURIOutput + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccessURI struct. +func (au *AccessURI) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var accessURIOutput AccessURIOutput + err = json.Unmarshal(*v, &accessURIOutput) + if err != nil { + return err + } + au.AccessURIOutput = &accessURIOutput + } + } + } + + return nil +} + +// AccessURIOutput azure properties, including output. +type AccessURIOutput struct { + // AccessURIRaw - Operation output data (raw JSON) + *AccessURIRaw `json:"output,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccessURIOutput. +func (auo AccessURIOutput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if auo.AccessURIRaw != nil { + objectMap["output"] = auo.AccessURIRaw + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccessURIOutput struct. +func (auo *AccessURIOutput) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "output": + if v != nil { + var accessURIRaw AccessURIRaw + err = json.Unmarshal(*v, &accessURIRaw) + if err != nil { + return err + } + auo.AccessURIRaw = &accessURIRaw + } + } + } + + return nil +} + +// AccessURIRaw this object gets 'bubbled up' through flattening. +type AccessURIRaw struct { + // AccessSAS - READ-ONLY; A SAS uri for accessing a disk. + AccessSAS *string `json:"accessSAS,omitempty"` +} + // AdditionalUnattendContent specifies additional XML formatted information that can be included in the // Unattend.xml file, which is used by Windows Setup. Contents are defined by setting name, component name, // and the pass in which the content is applied. @@ -1159,6 +1327,20 @@ type BootDiagnosticsInstanceView struct { SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` } +// CreationData data used when creating a disk. +type CreationData struct { + // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', 'Import', 'Copy' + CreateOption DiskCreateOption `json:"createOption,omitempty"` + // StorageAccountID - If createOption is Import, the Azure Resource Manager identifier of the storage account containing the blob to import as a disk. Required only if the blob is in a different subscription + StorageAccountID *string `json:"storageAccountId,omitempty"` + // ImageReference - Disk source information. + ImageReference *ImageDiskReference `json:"imageReference,omitempty"` + // SourceURI - If createOption is Import, this is the URI of a blob to be imported into a managed disk. + SourceURI *string `json:"sourceUri,omitempty"` + // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk. + SourceResourceID *string `json:"sourceResourceId,omitempty"` +} + // DataDisk describes a data disk. type DataDisk struct { // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. @@ -1169,11 +1351,11 @@ type DataDisk struct { Vhd *VirtualHardDisk `json:"vhd,omitempty"` // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. Image *VirtualHardDisk `json:"image,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite' + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' Caching CachingTypes `json:"caching,omitempty"` // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'FromImage', 'Empty', 'Attach' + // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` @@ -1194,6 +1376,144 @@ type DiagnosticsProfile struct { BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` } +// Disk disk resource. +type Disk struct { + autorest.Response `json:"-"` + // ManagedBy - READ-ONLY; A relative URI containing the ID of the VM that has the disk attached. + ManagedBy *string `json:"managedBy,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + // Zones - The Logical zone list for Disk. + Zones *[]string `json:"zones,omitempty"` + *DiskProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Disk. +func (d Disk) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if d.Sku != nil { + objectMap["sku"] = d.Sku + } + if d.Zones != nil { + objectMap["zones"] = d.Zones + } + if d.DiskProperties != nil { + objectMap["properties"] = d.DiskProperties + } + if d.Location != nil { + objectMap["location"] = d.Location + } + if d.Tags != nil { + objectMap["tags"] = d.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Disk struct. +func (d *Disk) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + d.ManagedBy = &managedBy + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + d.Sku = &sku + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + d.Zones = &zones + } + case "properties": + if v != nil { + var diskProperties DiskProperties + err = json.Unmarshal(*v, &diskProperties) + if err != nil { + return err + } + d.DiskProperties = &diskProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + d.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + d.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + d.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + d.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + d.Tags = tags + } + } + } + + return nil +} + // DiskEncryptionSettings describes a Encryption Settings for a Disk type DiskEncryptionSettings struct { // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. @@ -1214,6 +1534,412 @@ type DiskInstanceView struct { Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` } +// DiskList the List Disks operation response. +type DiskList struct { + autorest.Response `json:"-"` + // Value - A list of disks. + Value *[]Disk `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskListIterator provides access to a complete listing of Disk values. +type DiskListIterator struct { + i int + page DiskListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DiskListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskListIterator) Response() DiskList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskListIterator) Value() Disk { + if !iter.page.NotDone() { + return Disk{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DiskListIterator type. +func NewDiskListIterator(page DiskListPage) DiskListIterator { + return DiskListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dl DiskList) IsEmpty() bool { + return dl.Value == nil || len(*dl.Value) == 0 +} + +// diskListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dl DiskList) diskListPreparer(ctx context.Context) (*http.Request, error) { + if dl.NextLink == nil || len(to.String(dl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dl.NextLink))) +} + +// DiskListPage contains a page of Disk values. +type DiskListPage struct { + fn func(context.Context, DiskList) (DiskList, error) + dl DiskList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiskListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.dl) + if err != nil { + return err + } + page.dl = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskListPage) NotDone() bool { + return !page.dl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskListPage) Response() DiskList { + return page.dl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskListPage) Values() []Disk { + if page.dl.IsEmpty() { + return nil + } + return *page.dl.Value +} + +// Creates a new instance of the DiskListPage type. +func NewDiskListPage(getNextPage func(context.Context, DiskList) (DiskList, error)) DiskListPage { + return DiskListPage{fn: getNextPage} +} + +// DiskProperties disk resource properties. +type DiskProperties struct { + // TimeCreated - READ-ONLY; The time when the disk was created. + TimeCreated *date.Time `json:"timeCreated,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettings - Encryption settings for disk or snapshot + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` + // ProvisioningState - READ-ONLY; The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DisksCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.CreateOrUpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksDeleteFuture) Result(client DisksClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if osr.Response.Response, err = future.GetResult(sender); err == nil && osr.Response.Response.StatusCode != http.StatusNoContent { + osr, err = client.DeleteResponder(osr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", osr.Response.Response, "Failure responding to request") + } + } + return +} + +// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DisksGrantAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskSku the disks and snapshots sku name. Can be Standard_LRS or Premium_LRS. +type DiskSku struct { + // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS' + Name StorageAccountTypes `json:"name,omitempty"` + // Tier - READ-ONLY; The sku tier. + Tier *string `json:"tier,omitempty"` +} + +// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DisksRevokeAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksRevokeAccessFuture) Result(client DisksClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if osr.Response.Response, err = future.GetResult(sender); err == nil && osr.Response.Response.StatusCode != http.StatusNoContent { + osr, err = client.RevokeAccessResponder(osr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", osr.Response.Response, "Failure responding to request") + } + } + return +} + +// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.UpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskUpdate disk update resource. +type DiskUpdate struct { + *DiskUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + Sku *DiskSku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskUpdate. +func (du DiskUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if du.DiskUpdateProperties != nil { + objectMap["properties"] = du.DiskUpdateProperties + } + if du.Tags != nil { + objectMap["tags"] = du.Tags + } + if du.Sku != nil { + objectMap["sku"] = du.Sku + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct. +func (du *DiskUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskUpdateProperties DiskUpdateProperties + err = json.Unmarshal(*v, &diskUpdateProperties) + if err != nil { + return err + } + du.DiskUpdateProperties = &diskUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + du.Tags = tags + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + du.Sku = &sku + } + } + } + + return nil +} + +// DiskUpdateProperties disk resource update properties. +type DiskUpdateProperties struct { + // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettings - Encryption settings for disk or snapshot + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` +} + +// EncryptionSettings encryption settings for disk or snapshot +type EncryptionSettings struct { + // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. + Enabled *bool `json:"enabled,omitempty"` + // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// GrantAccessData data used for requesting a SAS. +type GrantAccessData struct { + // Access - Possible values include: 'None', 'Read' + Access AccessLevel `json:"access,omitempty"` + // DurationInSeconds - Time duration in seconds until the SAS access expires. + DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` +} + // HardwareProfile specifies the hardware settings for the virtual machine. type HardwareProfile struct { // VMSize - Specifies the size of the virtual machine. For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-sizes?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

The available VM sizes depend on region and availability set. For a list of available sizes use these APIs:

[List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)

[List all available virtual machine sizes in a region](https://docs.microsoft.com/rest/api/compute/virtualmachinesizes/list)

[List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). Possible values include: 'BasicA0', 'BasicA1', 'BasicA2', 'BasicA3', 'BasicA4', 'StandardA0', 'StandardA1', 'StandardA2', 'StandardA3', 'StandardA4', 'StandardA5', 'StandardA6', 'StandardA7', 'StandardA8', 'StandardA9', 'StandardA10', 'StandardA11', 'StandardA1V2', 'StandardA2V2', 'StandardA4V2', 'StandardA8V2', 'StandardA2mV2', 'StandardA4mV2', 'StandardA8mV2', 'StandardB1s', 'StandardB1ms', 'StandardB2s', 'StandardB2ms', 'StandardB4ms', 'StandardB8ms', 'StandardD1', 'StandardD2', 'StandardD3', 'StandardD4', 'StandardD11', 'StandardD12', 'StandardD13', 'StandardD14', 'StandardD1V2', 'StandardD2V2', 'StandardD3V2', 'StandardD4V2', 'StandardD5V2', 'StandardD2V3', 'StandardD4V3', 'StandardD8V3', 'StandardD16V3', 'StandardD32V3', 'StandardD64V3', 'StandardD2sV3', 'StandardD4sV3', 'StandardD8sV3', 'StandardD16sV3', 'StandardD32sV3', 'StandardD64sV3', 'StandardD11V2', 'StandardD12V2', 'StandardD13V2', 'StandardD14V2', 'StandardD15V2', 'StandardDS1', 'StandardDS2', 'StandardDS3', 'StandardDS4', 'StandardDS11', 'StandardDS12', 'StandardDS13', 'StandardDS14', 'StandardDS1V2', 'StandardDS2V2', 'StandardDS3V2', 'StandardDS4V2', 'StandardDS5V2', 'StandardDS11V2', 'StandardDS12V2', 'StandardDS13V2', 'StandardDS14V2', 'StandardDS15V2', 'StandardDS134V2', 'StandardDS132V2', 'StandardDS148V2', 'StandardDS144V2', 'StandardE2V3', 'StandardE4V3', 'StandardE8V3', 'StandardE16V3', 'StandardE32V3', 'StandardE64V3', 'StandardE2sV3', 'StandardE4sV3', 'StandardE8sV3', 'StandardE16sV3', 'StandardE32sV3', 'StandardE64sV3', 'StandardE3216V3', 'StandardE328sV3', 'StandardE6432sV3', 'StandardE6416sV3', 'StandardF1', 'StandardF2', 'StandardF4', 'StandardF8', 'StandardF16', 'StandardF1s', 'StandardF2s', 'StandardF4s', 'StandardF8s', 'StandardF16s', 'StandardF2sV2', 'StandardF4sV2', 'StandardF8sV2', 'StandardF16sV2', 'StandardF32sV2', 'StandardF64sV2', 'StandardF72sV2', 'StandardG1', 'StandardG2', 'StandardG3', 'StandardG4', 'StandardG5', 'StandardGS1', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5', 'StandardGS48', 'StandardGS44', 'StandardGS516', 'StandardGS58', 'StandardH8', 'StandardH16', 'StandardH8m', 'StandardH16m', 'StandardH16r', 'StandardH16mr', 'StandardL4s', 'StandardL8s', 'StandardL16s', 'StandardL32s', 'StandardM64s', 'StandardM64ms', 'StandardM128s', 'StandardM128ms', 'StandardM6432ms', 'StandardM6416ms', 'StandardM12864ms', 'StandardM12832ms', 'StandardNC6', 'StandardNC12', 'StandardNC24', 'StandardNC24r', 'StandardNC6sV2', 'StandardNC12sV2', 'StandardNC24sV2', 'StandardNC24rsV2', 'StandardNC6sV3', 'StandardNC12sV3', 'StandardNC24sV3', 'StandardNC24rsV3', 'StandardND6s', 'StandardND12s', 'StandardND24s', 'StandardND24rs', 'StandardNV6', 'StandardNV12', 'StandardNV24' @@ -1332,7 +2058,7 @@ type ImageDataDisk struct { ManagedDisk *SubResource `json:"managedDisk,omitempty"` // BlobURI - The Virtual Hard Disk. BlobURI *string `json:"blobUri,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite' + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' Caching CachingTypes `json:"caching,omitempty"` // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` @@ -1340,6 +2066,14 @@ type ImageDataDisk struct { StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` } +// ImageDiskReference the source image used for creating the disk. +type ImageDiskReference struct { + // ID - A relative uri containing either a Platform Image Repository or user image reference. + ID *string `json:"id,omitempty"` + // Lun - If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null. + Lun *int32 `json:"lun,omitempty"` +} + // ImageListResult the List Image operation response. type ImageListResult struct { autorest.Response `json:"-"` @@ -1498,7 +2232,7 @@ type ImageOSDisk struct { ManagedDisk *SubResource `json:"managedDisk,omitempty"` // BlobURI - The Virtual Hard Disk. BlobURI *string `json:"blobUri,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite' + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' Caching CachingTypes `json:"caching,omitempty"` // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` @@ -1702,6 +2436,23 @@ type InstanceViewStatus struct { Time *date.Time `json:"time,omitempty"` } +// KeyVaultAndKeyReference key Vault Key Url and vault id of KeK, KeK is optional and when provided is used +// to unwrap the encryptionKey +type KeyVaultAndKeyReference struct { + // SourceVault - Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` + // KeyURL - Url pointing to a key or secret in KeyVault + KeyURL *string `json:"keyUrl,omitempty"` +} + +// KeyVaultAndSecretReference key Vault Secret Url and vault id of the encryption key +type KeyVaultAndSecretReference struct { + // SourceVault - Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` + // SecretURL - Url pointing to a key or secret in KeyVault + SecretURL *string `json:"secretUrl,omitempty"` +} + // KeyVaultKeyReference describes a reference to Key Vault Key type KeyVaultKeyReference struct { // KeyURL - The URL referencing a key encryption key in Key Vault. @@ -2189,11 +2940,11 @@ type OSDisk struct { Vhd *VirtualHardDisk `json:"vhd,omitempty"` // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. Image *VirtualHardDisk `json:"image,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite' + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' Caching CachingTypes `json:"caching,omitempty"` // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'FromImage', 'Empty', 'Attach' + // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` @@ -2304,6 +3055,259 @@ func (r Resource) MarshalJSON() ([]byte, error) { return json.Marshal(objectMap) } +// ResourceSku describes an available Compute SKU. +type ResourceSku struct { + // ResourceType - READ-ONLY; The type of resource the SKU applies to. + ResourceType *string `json:"resourceType,omitempty"` + // Name - READ-ONLY; The name of SKU. + Name *string `json:"name,omitempty"` + // Tier - READ-ONLY; Specifies the tier of virtual machines in a scale set.

Possible Values:

**Standard**

**Basic** + Tier *string `json:"tier,omitempty"` + // Size - READ-ONLY; The Size of the SKU. + Size *string `json:"size,omitempty"` + // Family - READ-ONLY; The Family of this particular SKU. + Family *string `json:"family,omitempty"` + // Kind - READ-ONLY; The Kind of resources that are supported in this SKU. + Kind *string `json:"kind,omitempty"` + // Capacity - READ-ONLY; Specifies the number of virtual machines in the scale set. + Capacity *ResourceSkuCapacity `json:"capacity,omitempty"` + // Locations - READ-ONLY; The set of locations that the SKU is available. + Locations *[]string `json:"locations,omitempty"` + // LocationInfo - READ-ONLY; A list of locations and availability zones in those locations where the SKU is available. + LocationInfo *[]ResourceSkuLocationInfo `json:"locationInfo,omitempty"` + // APIVersions - READ-ONLY; The api versions that support this SKU. + APIVersions *[]string `json:"apiVersions,omitempty"` + // Costs - READ-ONLY; Metadata for retrieving price info. + Costs *[]ResourceSkuCosts `json:"costs,omitempty"` + // Capabilities - READ-ONLY; A name value pair to describe the capability. + Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` + // Restrictions - READ-ONLY; The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. + Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` +} + +// ResourceSkuCapabilities describes The SKU capabilities object. +type ResourceSkuCapabilities struct { + // Name - READ-ONLY; An invariant to describe the feature. + Name *string `json:"name,omitempty"` + // Value - READ-ONLY; An invariant if the feature is measured by quantity. + Value *string `json:"value,omitempty"` +} + +// ResourceSkuCapacity describes scaling information of a SKU. +type ResourceSkuCapacity struct { + // Minimum - READ-ONLY; The minimum capacity. + Minimum *int64 `json:"minimum,omitempty"` + // Maximum - READ-ONLY; The maximum capacity that can be set. + Maximum *int64 `json:"maximum,omitempty"` + // Default - READ-ONLY; The default capacity. + Default *int64 `json:"default,omitempty"` + // ScaleType - READ-ONLY; The scale type applicable to the sku. Possible values include: 'ResourceSkuCapacityScaleTypeAutomatic', 'ResourceSkuCapacityScaleTypeManual', 'ResourceSkuCapacityScaleTypeNone' + ScaleType ResourceSkuCapacityScaleType `json:"scaleType,omitempty"` +} + +// ResourceSkuCosts describes metadata for retrieving price info. +type ResourceSkuCosts struct { + // MeterID - READ-ONLY; Used for querying price from commerce. + MeterID *string `json:"meterID,omitempty"` + // Quantity - READ-ONLY; The multiplier is needed to extend the base metered cost. + Quantity *int64 `json:"quantity,omitempty"` + // ExtendedUnit - READ-ONLY; An invariant to show the extended unit. + ExtendedUnit *string `json:"extendedUnit,omitempty"` +} + +// ResourceSkuLocationInfo ... +type ResourceSkuLocationInfo struct { + // Location - READ-ONLY; Location of the SKU + Location *string `json:"location,omitempty"` + // Zones - READ-ONLY; List of availability zones where the SKU is supported. + Zones *[]string `json:"zones,omitempty"` +} + +// ResourceSkuRestrictionInfo ... +type ResourceSkuRestrictionInfo struct { + // Locations - READ-ONLY; Locations where the SKU is restricted + Locations *[]string `json:"locations,omitempty"` + // Zones - READ-ONLY; List of availability zones where the SKU is restricted. + Zones *[]string `json:"zones,omitempty"` +} + +// ResourceSkuRestrictions describes scaling information of a SKU. +type ResourceSkuRestrictions struct { + // Type - READ-ONLY; The type of restrictions. Possible values include: 'Location', 'Zone' + Type ResourceSkuRestrictionsType `json:"type,omitempty"` + // Values - READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. + Values *[]string `json:"values,omitempty"` + // RestrictionInfo - READ-ONLY; The information about the restriction where the SKU cannot be used. + RestrictionInfo *ResourceSkuRestrictionInfo `json:"restrictionInfo,omitempty"` + // ReasonCode - READ-ONLY; The reason for restriction. Possible values include: 'QuotaID', 'NotAvailableForSubscription' + ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` +} + +// ResourceSkusResult the List Resource Skus operation response. +type ResourceSkusResult struct { + autorest.Response `json:"-"` + // Value - The list of skus available for the subscription. + Value *[]ResourceSku `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of Resource Skus. Call ListNext() with this URI to fetch the next page of Resource Skus + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceSkusResultIterator provides access to a complete listing of ResourceSku values. +type ResourceSkusResultIterator struct { + i int + page ResourceSkusResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ResourceSkusResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ResourceSkusResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ResourceSkusResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ResourceSkusResultIterator) Response() ResourceSkusResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ResourceSkusResultIterator) Value() ResourceSku { + if !iter.page.NotDone() { + return ResourceSku{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ResourceSkusResultIterator type. +func NewResourceSkusResultIterator(page ResourceSkusResultPage) ResourceSkusResultIterator { + return ResourceSkusResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rsr ResourceSkusResult) IsEmpty() bool { + return rsr.Value == nil || len(*rsr.Value) == 0 +} + +// resourceSkusResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rsr ResourceSkusResult) resourceSkusResultPreparer(ctx context.Context) (*http.Request, error) { + if rsr.NextLink == nil || len(to.String(rsr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rsr.NextLink))) +} + +// ResourceSkusResultPage contains a page of ResourceSku values. +type ResourceSkusResultPage struct { + fn func(context.Context, ResourceSkusResult) (ResourceSkusResult, error) + rsr ResourceSkusResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ResourceSkusResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rsr) + if err != nil { + return err + } + page.rsr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ResourceSkusResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ResourceSkusResultPage) NotDone() bool { + return !page.rsr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ResourceSkusResultPage) Response() ResourceSkusResult { + return page.rsr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ResourceSkusResultPage) Values() []ResourceSku { + if page.rsr.IsEmpty() { + return nil + } + return *page.rsr.Value +} + +// Creates a new instance of the ResourceSkusResultPage type. +func NewResourceSkusResultPage(getNextPage func(context.Context, ResourceSkusResult) (ResourceSkusResult, error)) ResourceSkusResultPage { + return ResourceSkusResultPage{fn: getNextPage} +} + +// ResourceUpdate the Resource model definition. +type ResourceUpdate struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + Sku *DiskSku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceUpdate. +func (ru ResourceUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ru.Tags != nil { + objectMap["tags"] = ru.Tags + } + if ru.Sku != nil { + objectMap["sku"] = ru.Sku + } + return json.Marshal(objectMap) +} + // RollbackStatusInfo information about rollback on failed VM instances after a OS Upgrade operation. type RollbackStatusInfo struct { // SuccessfullyRolledbackInstanceCount - READ-ONLY; The number of instances which have been successfully rolled back. @@ -2782,6 +3786,493 @@ type Sku struct { Capacity *int64 `json:"capacity,omitempty"` } +// Snapshot snapshot resource. +type Snapshot struct { + autorest.Response `json:"-"` + // ManagedBy - READ-ONLY; Unused. Always Null. + ManagedBy *string `json:"managedBy,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + *DiskProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Snapshot. +func (s Snapshot) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if s.Sku != nil { + objectMap["sku"] = s.Sku + } + if s.DiskProperties != nil { + objectMap["properties"] = s.DiskProperties + } + if s.Location != nil { + objectMap["location"] = s.Location + } + if s.Tags != nil { + objectMap["tags"] = s.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Snapshot struct. +func (s *Snapshot) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + s.ManagedBy = &managedBy + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + s.Sku = &sku + } + case "properties": + if v != nil { + var diskProperties DiskProperties + err = json.Unmarshal(*v, &diskProperties) + if err != nil { + return err + } + s.DiskProperties = &diskProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + s.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + s.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + s.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + s.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + s.Tags = tags + } + } + } + + return nil +} + +// SnapshotList the List Snapshots operation response. +type SnapshotList struct { + autorest.Response `json:"-"` + // Value - A list of snapshots. + Value *[]Snapshot `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots. + NextLink *string `json:"nextLink,omitempty"` +} + +// SnapshotListIterator provides access to a complete listing of Snapshot values. +type SnapshotListIterator struct { + i int + page SnapshotListPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SnapshotListIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotListIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SnapshotListIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SnapshotListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SnapshotListIterator) Response() SnapshotList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SnapshotListIterator) Value() Snapshot { + if !iter.page.NotDone() { + return Snapshot{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SnapshotListIterator type. +func NewSnapshotListIterator(page SnapshotListPage) SnapshotListIterator { + return SnapshotListIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (sl SnapshotList) IsEmpty() bool { + return sl.Value == nil || len(*sl.Value) == 0 +} + +// snapshotListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (sl SnapshotList) snapshotListPreparer(ctx context.Context) (*http.Request, error) { + if sl.NextLink == nil || len(to.String(sl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(sl.NextLink))) +} + +// SnapshotListPage contains a page of Snapshot values. +type SnapshotListPage struct { + fn func(context.Context, SnapshotList) (SnapshotList, error) + sl SnapshotList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SnapshotListPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotListPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.sl) + if err != nil { + return err + } + page.sl = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SnapshotListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SnapshotListPage) NotDone() bool { + return !page.sl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SnapshotListPage) Response() SnapshotList { + return page.sl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SnapshotListPage) Values() []Snapshot { + if page.sl.IsEmpty() { + return nil + } + return *page.sl.Value +} + +// Creates a new instance of the SnapshotListPage type. +func NewSnapshotListPage(getNextPage func(context.Context, SnapshotList) (SnapshotList, error)) SnapshotListPage { + return SnapshotListPage{fn: getNextPage} +} + +// SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SnapshotsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsCreateOrUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.CreateOrUpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + +// SnapshotsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SnapshotsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsDeleteFuture) Result(client SnapshotsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsDeleteFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if osr.Response.Response, err = future.GetResult(sender); err == nil && osr.Response.Response.StatusCode != http.StatusNoContent { + osr, err = client.DeleteResponder(osr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsDeleteFuture", "Result", osr.Response.Response, "Failure responding to request") + } + } + return +} + +// SnapshotsGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SnapshotsGrantAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsGrantAccessFuture) Result(client SnapshotsClient) (au AccessURI, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + +// SnapshotsRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SnapshotsRevokeAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsRevokeAccessFuture) Result(client SnapshotsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsRevokeAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if osr.Response.Response, err = future.GetResult(sender); err == nil && osr.Response.Response.StatusCode != http.StatusNoContent { + osr, err = client.RevokeAccessResponder(osr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsRevokeAccessFuture", "Result", osr.Response.Response, "Failure responding to request") + } + } + return +} + +// SnapshotsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SnapshotsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.UpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + +// SnapshotUpdate snapshot update resource. +type SnapshotUpdate struct { + *DiskUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + Sku *DiskSku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for SnapshotUpdate. +func (su SnapshotUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if su.DiskUpdateProperties != nil { + objectMap["properties"] = su.DiskUpdateProperties + } + if su.Tags != nil { + objectMap["tags"] = su.Tags + } + if su.Sku != nil { + objectMap["sku"] = su.Sku + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SnapshotUpdate struct. +func (su *SnapshotUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskUpdateProperties DiskUpdateProperties + err = json.Unmarshal(*v, &diskUpdateProperties) + if err != nil { + return err + } + su.DiskUpdateProperties = &diskUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + su.Tags = tags + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + su.Sku = &sku + } + } + } + + return nil +} + +// SourceVault the vault id is an Azure Resource Manager Resource id in the form +// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} +type SourceVault struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + // SSHConfiguration SSH configuration for Linux based VMs running on Azure type SSHConfiguration struct { // PublicKeys - The list of SSH public keys used to authenticate with linux based VMs. @@ -4109,11 +5600,11 @@ type VirtualMachineScaleSetDataDisk struct { Name *string `json:"name,omitempty"` // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. Lun *int32 `json:"lun,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite' + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' Caching CachingTypes `json:"caching,omitempty"` // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - The create option. Possible values include: 'FromImage', 'Empty', 'Attach' + // CreateOption - The create option. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` @@ -5232,11 +6723,11 @@ type VirtualMachineScaleSetNetworkProfile struct { type VirtualMachineScaleSetOSDisk struct { // Name - The disk name. Name *string `json:"name,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite' + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' Caching CachingTypes `json:"caching,omitempty"` // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - Specifies how the virtual machines in the scale set should be created.

The only allowed value is: **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'FromImage', 'Empty', 'Attach' + // CreateOption - Specifies how the virtual machines in the scale set should be created.

The only allowed value is: **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

Possible values are:

**Windows**

**Linux**. Possible values include: 'Windows', 'Linux' OsType OperatingSystemTypes `json:"osType,omitempty"` @@ -6096,7 +7587,7 @@ type VirtualMachineScaleSetUpdateNetworkProfile struct { // VirtualMachineScaleSetUpdateOSDisk describes virtual machine scale set operating system disk Update // Object. This should be used for Updating VMSS OS Disk. type VirtualMachineScaleSetUpdateOSDisk struct { - // Caching - The caching type. Possible values include: 'None', 'ReadOnly', 'ReadWrite' + // Caching - The caching type. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' Caching CachingTypes `json:"caching,omitempty"` // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/operations.go index 153c0b0faf..bd3fd1a205 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/operations.go @@ -91,8 +91,8 @@ func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/resourceskus.go new file mode 100644 index 0000000000..15b2a399d4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/resourceskus.go @@ -0,0 +1,151 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ResourceSkusClient is the compute Client +type ResourceSkusClient struct { + BaseClient +} + +// NewResourceSkusClient creates an instance of the ResourceSkusClient client. +func NewResourceSkusClient(subscriptionID string) ResourceSkusClient { + return NewResourceSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client. +func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient { + return ResourceSkusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the list of Microsoft.Compute SKUs available for your Subscription. +func (client ResourceSkusClient) List(ctx context.Context) (result ResourceSkusResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List") + defer func() { + sc := -1 + if result.rsr.Response.Response != nil { + sc = result.rsr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rsr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure sending request") + return + } + + result.rsr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ResourceSkusClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ResourceSkusClient) ListResponder(resp *http.Response) (result ResourceSkusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ResourceSkusClient) listNextResults(ctx context.Context, lastResults ResourceSkusResult) (result ResourceSkusResult, err error) { + req, err := lastResults.resourceSkusResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ResourceSkusClient) ListComplete(ctx context.Context) (result ResourceSkusResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/snapshots.go new file mode 100644 index 0000000000..9f69ae2ddb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/snapshots.go @@ -0,0 +1,770 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SnapshotsClient is the compute Client +type SnapshotsClient struct { + BaseClient +} + +// NewSnapshotsClient creates an instance of the SnapshotsClient client. +func NewSnapshotsClient(subscriptionID string) SnapshotsClient { + return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient client. +func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient { + return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a snapshot. +// Parameters: +// resourceGroupName - the name of the resource group. +// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot +// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +// snapshot - snapshot object supplied in the body of the Put disk operation. +func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (result SnapshotsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: snapshot, + Constraints: []validation.Constraint{{Target: "snapshot.DiskProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "snapshot.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewError("compute.SnapshotsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, snapshotName, snapshot) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + snapshot.ManagedBy = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (future SnapshotsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a snapshot. +// Parameters: +// resourceGroupName - the name of the resource group. +// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot +// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +func (client SnapshotsClient) Delete(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, snapshotName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) DeleteSender(req *http.Request) (future SnapshotsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a snapshot. +// Parameters: +// resourceGroupName - the name of the resource group. +// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot +// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +func (client SnapshotsClient) Get(ctx context.Context, resourceGroupName string, snapshotName string) (result Snapshot, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, snapshotName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a snapshot. +// Parameters: +// resourceGroupName - the name of the resource group. +// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot +// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +// grantAccessData - access data object supplied in the body of the get snapshot access operation. +func (client SnapshotsClient) GrantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData) (result SnapshotsGrantAccessFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.GrantAccess") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("compute.SnapshotsClient", "GrantAccess", err.Error()) + } + + req, err := client.GrantAccessPreparer(ctx, resourceGroupName, snapshotName, grantAccessData) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", nil, "Failure preparing request") + return + } + + result, err = client.GrantAccessSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", result.Response(), "Failure sending request") + return + } + + return +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GrantAccessSender(req *http.Request) (future SnapshotsGrantAccessFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists snapshots under a subscription. +func (client SnapshotsClient) List(ctx context.Context) (result SnapshotListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.List") + defer func() { + sc := -1 + if result.sl.Response.Response != nil { + sc = result.sl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.sl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure sending request") + return + } + + result.sl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) listNextResults(ctx context.Context, lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.snapshotListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SnapshotsClient) ListComplete(ctx context.Context) (result SnapshotListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup lists snapshots under a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client SnapshotsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result SnapshotListPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.sl.Response.Response != nil { + sc = result.sl.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.sl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.sl, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) listByResourceGroupNextResults(ctx context.Context, lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.snapshotListPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client SnapshotsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result SnapshotListIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// RevokeAccess revokes access to a snapshot. +// Parameters: +// resourceGroupName - the name of the resource group. +// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot +// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +func (client SnapshotsClient) RevokeAccess(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsRevokeAccessFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.RevokeAccess") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RevokeAccessPreparer(ctx, resourceGroupName, snapshotName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + result, err = client.RevokeAccessSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", result.Response(), "Failure sending request") + return + } + + return +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (future SnapshotsRevokeAccessFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a snapshot. +// Parameters: +// resourceGroupName - the name of the resource group. +// snapshotName - the name of the snapshot that is being created. The name can't be changed after the snapshot +// is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters. +// snapshot - snapshot object supplied in the body of the Patch snapshot operation. +func (client SnapshotsClient) Update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate) (result SnapshotsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, snapshotName, snapshot) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) UpdateSender(req *http.Request) (future SnapshotsUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) UpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/usage.go index dd1ea0f3b1..4d9865e0fc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/usage.go @@ -107,8 +107,8 @@ func (client UsageClient) ListPreparer(ctx context.Context, location string) (*h // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensionimages.go index 2d9f17b279..969744acd3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensionimages.go @@ -102,8 +102,8 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(ctx context.Contex // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -178,8 +178,8 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(ctx context. // ListTypesSender sends the ListTypes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListTypesResponder handles the response to the ListTypes request. The method always @@ -265,8 +265,8 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(ctx conte // ListVersionsSender sends the ListVersions request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListVersionsResponder handles the response to the ListVersions request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensions.go index 65147873e6..5251fe760b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensions.go @@ -99,9 +99,9 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context. // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineExtensionsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -178,9 +178,9 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context, // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineExtensionsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -267,8 +267,8 @@ func (client VirtualMachineExtensionsClient) GetPreparer(ctx context.Context, re // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -343,9 +343,9 @@ func (client VirtualMachineExtensionsClient) UpdatePreparer(ctx context.Context, // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) UpdateSender(req *http.Request) (future VirtualMachineExtensionsUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineimages.go index e08bf1e0b8..c468426964 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineimages.go @@ -106,8 +106,8 @@ func (client VirtualMachineImagesClient) GetPreparer(ctx context.Context, locati // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -197,8 +197,8 @@ func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, locat // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -274,8 +274,8 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(ctx context.Context, // ListOffersSender sends the ListOffers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListOffersResponder handles the response to the ListOffers request. The method always @@ -349,8 +349,8 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(ctx context.Cont // ListPublishersSender sends the ListPublishers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListPublishersResponder handles the response to the ListPublishers request. The method always @@ -428,8 +428,8 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(ctx context.Context, l // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListSkusResponder handles the response to the ListSkus request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineruncommands.go index 93837d42f6..18790c7d5a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineruncommands.go @@ -107,8 +107,8 @@ func (client VirtualMachineRunCommandsClient) GetPreparer(ctx context.Context, l // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineRunCommandsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -189,8 +189,8 @@ func (client VirtualMachineRunCommandsClient) ListPreparer(ctx context.Context, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineRunCommandsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachines.go index 040bc713d1..f84acb1ef8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachines.go @@ -107,9 +107,9 @@ func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourc // CaptureSender sends the Capture request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) CaptureSender(req *http.Request) (future VirtualMachinesCaptureFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -185,9 +185,9 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Co // ConvertToManagedDisksSender sends the ConvertToManagedDisks request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Request) (future VirtualMachinesConvertToManagedDisksFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -287,9 +287,9 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context, // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachinesCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -365,9 +365,9 @@ func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, reso // DeallocateSender sends the Deallocate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (future VirtualMachinesDeallocateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -442,9 +442,9 @@ func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resource // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) DeleteSender(req *http.Request) (future VirtualMachinesDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -525,8 +525,8 @@ func (client VirtualMachinesClient) GeneralizePreparer(ctx context.Context, reso // GeneralizeSender sends the Generalize request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GeneralizeResponder handles the response to the Generalize request. The method always @@ -606,8 +606,8 @@ func (client VirtualMachinesClient) GetPreparer(ctx context.Context, resourceGro // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -687,8 +687,8 @@ func (client VirtualMachinesClient) GetExtensionsPreparer(ctx context.Context, r // GetExtensionsSender sends the GetExtensions request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GetExtensionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetExtensionsResponder handles the response to the GetExtensions request. The method always @@ -764,8 +764,8 @@ func (client VirtualMachinesClient) InstanceViewPreparer(ctx context.Context, re // InstanceViewSender sends the InstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) InstanceViewSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // InstanceViewResponder handles the response to the InstanceView request. The method always @@ -841,8 +841,8 @@ func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGr // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -952,8 +952,8 @@ func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context) (*http. // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListAllResponder handles the response to the ListAll request. The method always @@ -1066,8 +1066,8 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(ctx context.Conte // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always @@ -1148,8 +1148,8 @@ func (client VirtualMachinesClient) ListByLocationPreparer(ctx context.Context, // ListByLocationSender sends the ListByLocation request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListByLocationSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListByLocationResponder handles the response to the ListByLocation request. The method always @@ -1256,9 +1256,9 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Conte // PerformMaintenanceSender sends the PerformMaintenance request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachinesPerformMaintenanceFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1334,9 +1334,9 @@ func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resour // PowerOffSender sends the PowerOff request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (future VirtualMachinesPowerOffFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1357,7 +1357,7 @@ func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (resu return } -// Redeploy the operation to redeploy a virtual machine. +// Redeploy shuts down the virtual machine, moves it to a new node, and powers it back on. // Parameters: // resourceGroupName - the name of the resource group. // VMName - the name of the virtual machine. @@ -1411,9 +1411,9 @@ func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resour // RedeploySender sends the Redeploy request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) RedeploySender(req *http.Request) (future VirtualMachinesRedeployFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1488,9 +1488,9 @@ func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourc // RestartSender sends the Restart request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) RestartSender(req *http.Request) (future VirtualMachinesRestartFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1574,9 +1574,9 @@ func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, reso // RunCommandSender sends the RunCommand request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (future VirtualMachinesRunCommandFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1651,9 +1651,9 @@ func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceG // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) StartSender(req *http.Request) (future VirtualMachinesStartFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1731,9 +1731,9 @@ func (client VirtualMachinesClient) UpdatePreparer(ctx context.Context, resource // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) UpdateSender(req *http.Request) (future VirtualMachinesUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetextensions.go index 8e4d578d04..4471f46440 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetextensions.go @@ -100,9 +100,9 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -179,9 +179,9 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context. // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetExtensionsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -268,8 +268,8 @@ func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(ctx context.Con // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -346,8 +346,8 @@ func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(ctx context.Co // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetrollingupgrades.go index b6d7e4b6de..2b44c3eeb4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetrollingupgrades.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetrollingupgrades.go @@ -96,9 +96,9 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx con // CancelSender sends the Cancel request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesCancelFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -179,8 +179,8 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(ctx // GetLatestSender sends the GetLatest request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetLatestResponder handles the response to the GetLatest request. The method always @@ -251,9 +251,9 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer // StartOSUpgradeSender sends the StartOSUpgrade request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesets.go index c63300f877..a4dab609e1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesets.go @@ -121,9 +121,9 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.C // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -205,9 +205,9 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Conte // DeallocateSender sends the Deallocate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetsDeallocateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -282,9 +282,9 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context, // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -368,9 +368,9 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context. // DeleteInstancesSender sends the DeleteInstances request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (future VirtualMachineScaleSetsDeleteInstancesFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -454,8 +454,8 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp // ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender sends the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ForceRecoveryServiceFabricPlatformUpdateDomainWalkResponder handles the response to the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. The method always @@ -531,8 +531,8 @@ func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, res // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -608,8 +608,8 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(ctx context. // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -686,8 +686,8 @@ func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryPreparer(ctx cont // GetOSUpgradeHistorySender sends the GetOSUpgradeHistory request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistorySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetOSUpgradeHistoryResponder handles the response to the GetOSUpgradeHistory request. The method always @@ -799,8 +799,8 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(ctx context.Context, re // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -911,8 +911,8 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer(ctx context.Context) // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListAllResponder handles the response to the ListAll request. The method always @@ -1027,8 +1027,8 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(ctx context.Context // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListSkusResponder handles the response to the ListSkus request. The method always @@ -1143,9 +1143,9 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenancePreparer(ctx conte // PerformMaintenanceSender sends the PerformMaintenance request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachineScaleSetsPerformMaintenanceFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1227,9 +1227,9 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context // PowerOffSender sends the PowerOff request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetsPowerOffFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1250,7 +1250,8 @@ func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Respons return } -// Redeploy redeploy one or more virtual machines in a VM scale set. +// Redeploy shuts down all the virtual machines in the virtual machine scale set, moves them to a new node, and powers +// them back on. // Parameters: // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. @@ -1310,9 +1311,9 @@ func (client VirtualMachineScaleSetsClient) RedeployPreparer(ctx context.Context // RedeploySender sends the Redeploy request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) RedeploySender(req *http.Request) (future VirtualMachineScaleSetsRedeployFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1393,9 +1394,9 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, // ReimageSender sends the Reimage request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetsReimageFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1477,9 +1478,9 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Conte // ReimageAllSender sends the ReimageAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetsReimageAllFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1560,9 +1561,9 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context, // RestartSender sends the Restart request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetsRestartFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1643,9 +1644,9 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, r // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetsStartFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1723,9 +1724,9 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context, // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetsUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1809,9 +1810,9 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context. // UpdateInstancesSender sends the UpdateInstances request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (future VirtualMachineScaleSetsUpdateInstancesFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetvms.go index ee4dc7382f..ec5de8760a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetvms.go @@ -99,9 +99,9 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Con // DeallocateSender sends the Deallocate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetVMsDeallocateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -178,9 +178,9 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetVMsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -263,8 +263,8 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -342,8 +342,8 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx contex // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -432,8 +432,8 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(ctx context.Context, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -542,9 +542,9 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenancePreparer(ctx con // PerformMaintenanceSender sends the PerformMaintenance request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -622,9 +622,9 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Conte // PowerOffSender sends the PowerOff request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetVMsPowerOffFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -645,7 +645,8 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Respo return } -// Redeploy redeploys a virtual machine in a VM scale set. +// Redeploy shuts down the virtual machine in the virtual machine scale set, moves it to a new node, and powers it back +// on. // Parameters: // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. @@ -701,9 +702,9 @@ func (client VirtualMachineScaleSetVMsClient) RedeployPreparer(ctx context.Conte // RedeploySender sends the Redeploy request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) RedeploySender(req *http.Request) (future VirtualMachineScaleSetVMsRedeployFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -780,9 +781,9 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Contex // ReimageSender sends the Reimage request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -860,9 +861,9 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Con // ReimageAllSender sends the ReimageAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageAllFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -939,9 +940,9 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Contex // RestartSender sends the Restart request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetVMsRestartFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1018,9 +1019,9 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context, // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetVMsStartFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } @@ -1124,9 +1125,9 @@ func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetVMsUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + resp, err = autorest.SendWithSender(client, req, sd...) if err != nil { return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinesizes.go index 9864afe621..d15befbc62 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinesizes.go @@ -105,8 +105,8 @@ func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, locati // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go index 36a3275fda..fab7676fa8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go @@ -110,8 +110,8 @@ func (client ApplicationsClient) AddOwnerPreparer(ctx context.Context, applicati // AddOwnerSender sends the AddOwner request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) AddOwnerSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // AddOwnerResponder handles the response to the AddOwner request. The method always @@ -191,8 +191,8 @@ func (client ApplicationsClient) CreatePreparer(ctx context.Context, parameters // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) CreateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateResponder handles the response to the Create request. The method always @@ -266,8 +266,8 @@ func (client ApplicationsClient) DeletePreparer(ctx context.Context, application // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always @@ -340,8 +340,8 @@ func (client ApplicationsClient) GetPreparer(ctx context.Context, applicationObj // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -415,8 +415,8 @@ func (client ApplicationsClient) GetServicePrincipalsIDByAppIDPreparer(ctx conte // GetServicePrincipalsIDByAppIDSender sends the GetServicePrincipalsIDByAppID request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) GetServicePrincipalsIDByAppIDSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetServicePrincipalsIDByAppIDResponder handles the response to the GetServicePrincipalsIDByAppID request. The method always @@ -498,8 +498,8 @@ func (client ApplicationsClient) ListPreparer(ctx context.Context, filter string // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -589,8 +589,8 @@ func (client ApplicationsClient) ListKeyCredentialsPreparer(ctx context.Context, // ListKeyCredentialsSender sends the ListKeyCredentials request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListKeyCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListKeyCredentialsResponder handles the response to the ListKeyCredentials request. The method always @@ -664,8 +664,8 @@ func (client ApplicationsClient) ListNextPreparer(ctx context.Context, nextLink // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListNextSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListNextResponder handles the response to the ListNext request. The method always @@ -740,8 +740,8 @@ func (client ApplicationsClient) ListOwnersPreparer(ctx context.Context, applica // ListOwnersSender sends the ListOwners request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListOwnersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListOwnersResponder handles the response to the ListOwners request. The method always @@ -852,8 +852,8 @@ func (client ApplicationsClient) ListPasswordCredentialsPreparer(ctx context.Con // ListPasswordCredentialsSender sends the ListPasswordCredentials request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListPasswordCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListPasswordCredentialsResponder handles the response to the ListPasswordCredentials request. The method always @@ -930,8 +930,8 @@ func (client ApplicationsClient) PatchPreparer(ctx context.Context, applicationO // PatchSender sends the Patch request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) PatchSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // PatchResponder handles the response to the Patch request. The method always @@ -1006,8 +1006,8 @@ func (client ApplicationsClient) RemoveOwnerPreparer(ctx context.Context, applic // RemoveOwnerSender sends the RemoveOwner request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) RemoveOwnerSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RemoveOwnerResponder handles the response to the RemoveOwner request. The method always @@ -1083,8 +1083,8 @@ func (client ApplicationsClient) UpdateKeyCredentialsPreparer(ctx context.Contex // UpdateKeyCredentialsSender sends the UpdateKeyCredentials request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) UpdateKeyCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateKeyCredentialsResponder handles the response to the UpdateKeyCredentials request. The method always @@ -1160,8 +1160,8 @@ func (client ApplicationsClient) UpdatePasswordCredentialsPreparer(ctx context.C // UpdatePasswordCredentialsSender sends the UpdatePasswordCredentials request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) UpdatePasswordCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdatePasswordCredentialsResponder handles the response to the UpdatePasswordCredentials request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go index 1107dff959..fe3b2da5db 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go @@ -99,8 +99,8 @@ func (client DeletedApplicationsClient) HardDeletePreparer(ctx context.Context, // HardDeleteSender sends the HardDelete request. The method will close the // http.Response Body if it receives an error. func (client DeletedApplicationsClient) HardDeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // HardDeleteResponder handles the response to the HardDelete request. The method always @@ -181,8 +181,8 @@ func (client DeletedApplicationsClient) ListPreparer(ctx context.Context, filter // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DeletedApplicationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -272,8 +272,8 @@ func (client DeletedApplicationsClient) ListNextPreparer(ctx context.Context, ne // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client DeletedApplicationsClient) ListNextSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListNextResponder handles the response to the ListNext request. The method always @@ -347,8 +347,8 @@ func (client DeletedApplicationsClient) RestorePreparer(ctx context.Context, obj // RestoreSender sends the Restore request. The method will close the // http.Response Body if it receives an error. func (client DeletedApplicationsClient) RestoreSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RestoreResponder handles the response to the Restore request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go index e19a6fe06f..93c6cac1e0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go @@ -98,8 +98,8 @@ func (client DomainsClient) GetPreparer(ctx context.Context, domainName string) // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -175,8 +175,8 @@ func (client DomainsClient) ListPreparer(ctx context.Context, filter string) (*h // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go index 3f0e2097dd..4ed37e94f0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go @@ -110,8 +110,8 @@ func (client GroupsClient) AddMemberPreparer(ctx context.Context, groupObjectID // AddMemberSender sends the AddMember request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) AddMemberSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // AddMemberResponder handles the response to the AddMember request. The method always @@ -194,8 +194,8 @@ func (client GroupsClient) AddOwnerPreparer(ctx context.Context, objectID string // AddOwnerSender sends the AddOwner request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) AddOwnerSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // AddOwnerResponder handles the response to the AddOwner request. The method always @@ -278,8 +278,8 @@ func (client GroupsClient) CreatePreparer(ctx context.Context, parameters GroupC // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) CreateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateResponder handles the response to the Create request. The method always @@ -353,8 +353,8 @@ func (client GroupsClient) DeletePreparer(ctx context.Context, objectID string) // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always @@ -427,8 +427,8 @@ func (client GroupsClient) GetPreparer(ctx context.Context, objectID string) (*h // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -508,8 +508,8 @@ func (client GroupsClient) GetGroupMembersPreparer(ctx context.Context, objectID // GetGroupMembersSender sends the GetGroupMembers request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) GetGroupMembersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetGroupMembersResponder handles the response to the GetGroupMembers request. The method always @@ -599,8 +599,8 @@ func (client GroupsClient) GetGroupMembersNextPreparer(ctx context.Context, next // GetGroupMembersNextSender sends the GetGroupMembersNext request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) GetGroupMembersNextSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetGroupMembersNextResponder handles the response to the GetGroupMembersNext request. The method always @@ -683,8 +683,8 @@ func (client GroupsClient) GetMemberGroupsPreparer(ctx context.Context, objectID // GetMemberGroupsSender sends the GetMemberGroups request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) GetMemberGroupsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetMemberGroupsResponder handles the response to the GetMemberGroups request. The method always @@ -767,8 +767,8 @@ func (client GroupsClient) IsMemberOfPreparer(ctx context.Context, parameters Ch // IsMemberOfSender sends the IsMemberOf request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) IsMemberOfSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // IsMemberOfResponder handles the response to the IsMemberOf request. The method always @@ -850,8 +850,8 @@ func (client GroupsClient) ListPreparer(ctx context.Context, filter string) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -941,8 +941,8 @@ func (client GroupsClient) ListNextPreparer(ctx context.Context, nextLink string // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) ListNextSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListNextResponder handles the response to the ListNext request. The method always @@ -1017,8 +1017,8 @@ func (client GroupsClient) ListOwnersPreparer(ctx context.Context, objectID stri // ListOwnersSender sends the ListOwners request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) ListOwnersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListOwnersResponder handles the response to the ListOwners request. The method always @@ -1131,8 +1131,8 @@ func (client GroupsClient) RemoveMemberPreparer(ctx context.Context, groupObject // RemoveMemberSender sends the RemoveMember request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) RemoveMemberSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RemoveMemberResponder handles the response to the RemoveMember request. The method always @@ -1207,8 +1207,8 @@ func (client GroupsClient) RemoveOwnerPreparer(ctx context.Context, objectID str // RemoveOwnerSender sends the RemoveOwner request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) RemoveOwnerSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RemoveOwnerResponder handles the response to the RemoveOwner request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go index 3d1ec66eb6..04ddbb3e89 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go @@ -103,8 +103,8 @@ func (client OAuth2PermissionGrantClient) CreatePreparer(ctx context.Context, bo // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client OAuth2PermissionGrantClient) CreateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateResponder handles the response to the Create request. The method always @@ -178,8 +178,8 @@ func (client OAuth2PermissionGrantClient) DeletePreparer(ctx context.Context, ob // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client OAuth2PermissionGrantClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always @@ -260,8 +260,8 @@ func (client OAuth2PermissionGrantClient) ListPreparer(ctx context.Context, filt // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client OAuth2PermissionGrantClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -351,8 +351,8 @@ func (client OAuth2PermissionGrantClient) ListNextPreparer(ctx context.Context, // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client OAuth2PermissionGrantClient) ListNextSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListNextResponder handles the response to the ListNext request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go index 04d3cc6395..3f6ca5c4bf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go @@ -107,8 +107,8 @@ func (client ObjectsClient) GetObjectsByObjectIdsPreparer(ctx context.Context, p // GetObjectsByObjectIdsSender sends the GetObjectsByObjectIds request. The method will close the // http.Response Body if it receives an error. func (client ObjectsClient) GetObjectsByObjectIdsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetObjectsByObjectIdsResponder handles the response to the GetObjectsByObjectIds request. The method always @@ -198,8 +198,8 @@ func (client ObjectsClient) GetObjectsByObjectIdsNextPreparer(ctx context.Contex // GetObjectsByObjectIdsNextSender sends the GetObjectsByObjectIdsNext request. The method will close the // http.Response Body if it receives an error. func (client ObjectsClient) GetObjectsByObjectIdsNextSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetObjectsByObjectIdsNextResponder handles the response to the GetObjectsByObjectIdsNext request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go index bca9e9ed2e..45099830b5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go @@ -107,8 +107,8 @@ func (client ServicePrincipalsClient) CreatePreparer(ctx context.Context, parame // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) CreateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateResponder handles the response to the Create request. The method always @@ -182,8 +182,8 @@ func (client ServicePrincipalsClient) DeletePreparer(ctx context.Context, object // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always @@ -256,8 +256,8 @@ func (client ServicePrincipalsClient) GetPreparer(ctx context.Context, objectID // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -339,8 +339,8 @@ func (client ServicePrincipalsClient) ListPreparer(ctx context.Context, filter s // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -430,8 +430,8 @@ func (client ServicePrincipalsClient) ListKeyCredentialsPreparer(ctx context.Con // ListKeyCredentialsSender sends the ListKeyCredentials request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListKeyCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListKeyCredentialsResponder handles the response to the ListKeyCredentials request. The method always @@ -505,8 +505,8 @@ func (client ServicePrincipalsClient) ListNextPreparer(ctx context.Context, next // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListNextSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListNextResponder handles the response to the ListNext request. The method always @@ -581,8 +581,8 @@ func (client ServicePrincipalsClient) ListOwnersPreparer(ctx context.Context, ob // ListOwnersSender sends the ListOwners request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListOwnersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListOwnersResponder handles the response to the ListOwners request. The method always @@ -693,8 +693,8 @@ func (client ServicePrincipalsClient) ListPasswordCredentialsPreparer(ctx contex // ListPasswordCredentialsSender sends the ListPasswordCredentials request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListPasswordCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListPasswordCredentialsResponder handles the response to the ListPasswordCredentials request. The method always @@ -771,8 +771,8 @@ func (client ServicePrincipalsClient) UpdatePreparer(ctx context.Context, object // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateResponder handles the response to the Update request. The method always @@ -848,8 +848,8 @@ func (client ServicePrincipalsClient) UpdateKeyCredentialsPreparer(ctx context.C // UpdateKeyCredentialsSender sends the UpdateKeyCredentials request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) UpdateKeyCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateKeyCredentialsResponder handles the response to the UpdateKeyCredentials request. The method always @@ -925,8 +925,8 @@ func (client ServicePrincipalsClient) UpdatePasswordCredentialsPreparer(ctx cont // UpdatePasswordCredentialsSender sends the UpdatePasswordCredentials request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) UpdatePasswordCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdatePasswordCredentialsResponder handles the response to the UpdatePasswordCredentials request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go index 3b89fca32d..057658eefd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go @@ -96,8 +96,8 @@ func (client SignedInUserClient) GetPreparer(ctx context.Context) (*http.Request // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SignedInUserClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -174,8 +174,8 @@ func (client SignedInUserClient) ListOwnedObjectsPreparer(ctx context.Context) ( // ListOwnedObjectsSender sends the ListOwnedObjects request. The method will close the // http.Response Body if it receives an error. func (client SignedInUserClient) ListOwnedObjectsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListOwnedObjectsResponder handles the response to the ListOwnedObjects request. The method always @@ -265,8 +265,8 @@ func (client SignedInUserClient) ListOwnedObjectsNextPreparer(ctx context.Contex // ListOwnedObjectsNextSender sends the ListOwnedObjectsNext request. The method will close the // http.Response Body if it receives an error. func (client SignedInUserClient) ListOwnedObjectsNextSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListOwnedObjectsNextResponder handles the response to the ListOwnedObjectsNext request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go index a2ef210a27..3c688fe7ba 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go @@ -112,8 +112,8 @@ func (client UsersClient) CreatePreparer(ctx context.Context, parameters UserCre // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) CreateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateResponder handles the response to the Create request. The method always @@ -187,8 +187,8 @@ func (client UsersClient) DeletePreparer(ctx context.Context, upnOrObjectID stri // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always @@ -261,8 +261,8 @@ func (client UsersClient) GetPreparer(ctx context.Context, upnOrObjectID string) // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -345,8 +345,8 @@ func (client UsersClient) GetMemberGroupsPreparer(ctx context.Context, objectID // GetMemberGroupsSender sends the GetMemberGroups request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) GetMemberGroupsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetMemberGroupsResponder handles the response to the GetMemberGroups request. The method always @@ -428,8 +428,8 @@ func (client UsersClient) ListPreparer(ctx context.Context, filter string) (*htt // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -519,8 +519,8 @@ func (client UsersClient) ListNextPreparer(ctx context.Context, nextLink string) // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) ListNextSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListNextResponder handles the response to the ListNext request. The method always @@ -597,8 +597,8 @@ func (client UsersClient) UpdatePreparer(ctx context.Context, upnOrObjectID stri // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) UpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateResponder handles the response to the Update request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go similarity index 79% rename from vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/client.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go index 2cd269df66..2ca7fd7754 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go @@ -1,4 +1,4 @@ -// Package keyvault implements the Azure ARM Keyvault service API version 2016-10-01. +// Package keyvault implements the Azure ARM Keyvault service API version 7.0. // // The key vault client performs cryptographic key operations and vault operations against the Key Vault service. package keyvault @@ -46,6 +46,86 @@ func NewWithoutDefaults() BaseClient { } } +// BackupCertificate requests that a backup of the specified certificate be downloaded to the client. All versions of +// the certificate will be downloaded. This operation requires the certificates/backup permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// certificateName - the name of the certificate. +func (client BaseClient) BackupCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result BackupCertificateResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.BackupCertificatePreparer(ctx, vaultBaseURL, certificateName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.BackupCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure sending request") + return + } + + result, err = client.BackupCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure responding to request") + } + + return +} + +// BackupCertificatePreparer prepares the BackupCertificate request. +func (client BaseClient) BackupCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "certificate-name": autorest.Encode("path", certificateName), + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/certificates/{certificate-name}/backup", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// BackupCertificateSender sends the BackupCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) BackupCertificateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// BackupCertificateResponder handles the response to the BackupCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) BackupCertificateResponder(resp *http.Response) (result BackupCertificateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // BackupKey the Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this operation // does NOT return key material in a form that can be used outside the Azure Key Vault system, the returned key // material is either protected to a Azure Key Vault HSM or to Azure Key Vault itself. The intent of this operation is @@ -100,7 +180,7 @@ func (client BaseClient) BackupKeyPreparer(ctx context.Context, vaultBaseURL str "key-name": autorest.Encode("path", keyName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -116,8 +196,8 @@ func (client BaseClient) BackupKeyPreparer(ctx context.Context, vaultBaseURL str // BackupKeySender sends the BackupKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) BackupKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // BackupKeyResponder handles the response to the BackupKey request. The method always @@ -180,7 +260,7 @@ func (client BaseClient) BackupSecretPreparer(ctx context.Context, vaultBaseURL "secret-name": autorest.Encode("path", secretName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -196,8 +276,8 @@ func (client BaseClient) BackupSecretPreparer(ctx context.Context, vaultBaseURL // BackupSecretSender sends the BackupSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) BackupSecretSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // BackupSecretResponder handles the response to the BackupSecret request. The method always @@ -213,6 +293,86 @@ func (client BaseClient) BackupSecretResponder(resp *http.Response) (result Back return } +// BackupStorageAccount requests that a backup of the specified storage account be downloaded to the client. This +// operation requires the storage/backup permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) BackupStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result BackupStorageResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.BackupStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.BackupStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.BackupStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure responding to request") + } + + return +} + +// BackupStorageAccountPreparer prepares the BackupStorageAccount request. +func (client BaseClient) BackupStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/storage/{storage-account-name}/backup", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// BackupStorageAccountSender sends the BackupStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) BackupStorageAccountSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// BackupStorageAccountResponder handles the response to the BackupStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) BackupStorageAccountResponder(resp *http.Response) (result BackupStorageResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // CreateCertificate if this is the first version, the certificate resource is created. This operation requires the // certificates/create permission. // Parameters: @@ -274,7 +434,7 @@ func (client BaseClient) CreateCertificatePreparer(ctx context.Context, vaultBas "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -292,8 +452,8 @@ func (client BaseClient) CreateCertificatePreparer(ctx context.Context, vaultBas // CreateCertificateSender sends the CreateCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) CreateCertificateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateCertificateResponder handles the response to the CreateCertificate request. The method always @@ -363,7 +523,7 @@ func (client BaseClient) CreateKeyPreparer(ctx context.Context, vaultBaseURL str "key-name": autorest.Encode("path", keyName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -381,8 +541,8 @@ func (client BaseClient) CreateKeyPreparer(ctx context.Context, vaultBaseURL str // CreateKeySender sends the CreateKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) CreateKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateKeyResponder handles the response to the CreateKey request. The method always @@ -457,7 +617,7 @@ func (client BaseClient) DecryptPreparer(ctx context.Context, vaultBaseURL strin "key-version": autorest.Encode("path", keyVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -475,8 +635,8 @@ func (client BaseClient) DecryptPreparer(ctx context.Context, vaultBaseURL strin // DecryptSender sends the Decrypt request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DecryptSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DecryptResponder handles the response to the Decrypt request. The method always @@ -540,7 +700,7 @@ func (client BaseClient) DeleteCertificatePreparer(ctx context.Context, vaultBas "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -556,8 +716,8 @@ func (client BaseClient) DeleteCertificatePreparer(ctx context.Context, vaultBas // DeleteCertificateSender sends the DeleteCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteCertificateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteCertificateResponder handles the response to the DeleteCertificate request. The method always @@ -615,7 +775,7 @@ func (client BaseClient) DeleteCertificateContactsPreparer(ctx context.Context, "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -631,8 +791,8 @@ func (client BaseClient) DeleteCertificateContactsPreparer(ctx context.Context, // DeleteCertificateContactsSender sends the DeleteCertificateContacts request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteCertificateContactsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteCertificateContactsResponder handles the response to the DeleteCertificateContacts request. The method always @@ -695,7 +855,7 @@ func (client BaseClient) DeleteCertificateIssuerPreparer(ctx context.Context, va "issuer-name": autorest.Encode("path", issuerName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -711,8 +871,8 @@ func (client BaseClient) DeleteCertificateIssuerPreparer(ctx context.Context, va // DeleteCertificateIssuerSender sends the DeleteCertificateIssuer request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteCertificateIssuerResponder handles the response to the DeleteCertificateIssuer request. The method always @@ -775,7 +935,7 @@ func (client BaseClient) DeleteCertificateOperationPreparer(ctx context.Context, "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -791,8 +951,8 @@ func (client BaseClient) DeleteCertificateOperationPreparer(ctx context.Context, // DeleteCertificateOperationSender sends the DeleteCertificateOperation request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteCertificateOperationSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteCertificateOperationResponder handles the response to the DeleteCertificateOperation request. The method always @@ -856,7 +1016,7 @@ func (client BaseClient) DeleteKeyPreparer(ctx context.Context, vaultBaseURL str "key-name": autorest.Encode("path", keyName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -872,8 +1032,8 @@ func (client BaseClient) DeleteKeyPreparer(ctx context.Context, vaultBaseURL str // DeleteKeySender sends the DeleteKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteKeyResponder handles the response to the DeleteKey request. The method always @@ -895,7 +1055,7 @@ func (client BaseClient) DeleteKeyResponder(resp *http.Response) (result Deleted // vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. // storageAccountName - the name of the storage account. // sasDefinitionName - the name of the SAS definition. -func (client BaseClient) DeleteSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { +func (client BaseClient) DeleteSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSasDefinition") defer func() { @@ -946,7 +1106,7 @@ func (client BaseClient) DeleteSasDefinitionPreparer(ctx context.Context, vaultB "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -962,13 +1122,13 @@ func (client BaseClient) DeleteSasDefinitionPreparer(ctx context.Context, vaultB // DeleteSasDefinitionSender sends the DeleteSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteSasDefinitionSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteSasDefinitionResponder handles the response to the DeleteSasDefinition request. The method always // closes the http.Response Body. -func (client BaseClient) DeleteSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { +func (client BaseClient) DeleteSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { err = autorest.Respond( resp, client.ByInspecting(), @@ -1026,7 +1186,7 @@ func (client BaseClient) DeleteSecretPreparer(ctx context.Context, vaultBaseURL "secret-name": autorest.Encode("path", secretName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1042,8 +1202,8 @@ func (client BaseClient) DeleteSecretPreparer(ctx context.Context, vaultBaseURL // DeleteSecretSender sends the DeleteSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteSecretSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteSecretResponder handles the response to the DeleteSecret request. The method always @@ -1063,7 +1223,7 @@ func (client BaseClient) DeleteSecretResponder(resp *http.Response) (result Dele // Parameters: // vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. // storageAccountName - the name of the storage account. -func (client BaseClient) DeleteStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { +func (client BaseClient) DeleteStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteStorageAccount") defer func() { @@ -1111,7 +1271,7 @@ func (client BaseClient) DeleteStorageAccountPreparer(ctx context.Context, vault "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1127,13 +1287,13 @@ func (client BaseClient) DeleteStorageAccountPreparer(ctx context.Context, vault // DeleteStorageAccountSender sends the DeleteStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteStorageAccountSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteStorageAccountResponder handles the response to the DeleteStorageAccount request. The method always // closes the http.Response Body. -func (client BaseClient) DeleteStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { +func (client BaseClient) DeleteStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { err = autorest.Respond( resp, client.ByInspecting(), @@ -1205,7 +1365,7 @@ func (client BaseClient) EncryptPreparer(ctx context.Context, vaultBaseURL strin "key-version": autorest.Encode("path", keyVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1223,8 +1383,8 @@ func (client BaseClient) EncryptPreparer(ctx context.Context, vaultBaseURL strin // EncryptSender sends the Encrypt request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) EncryptSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // EncryptResponder handles the response to the Encrypt request. The method always @@ -1289,7 +1449,7 @@ func (client BaseClient) GetCertificatePreparer(ctx context.Context, vaultBaseUR "certificate-version": autorest.Encode("path", certificateVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1305,8 +1465,8 @@ func (client BaseClient) GetCertificatePreparer(ctx context.Context, vaultBaseUR // GetCertificateSender sends the GetCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetCertificateResponder handles the response to the GetCertificate request. The method always @@ -1364,7 +1524,7 @@ func (client BaseClient) GetCertificateContactsPreparer(ctx context.Context, vau "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1380,8 +1540,8 @@ func (client BaseClient) GetCertificateContactsPreparer(ctx context.Context, vau // GetCertificateContactsSender sends the GetCertificateContacts request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateContactsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetCertificateContactsResponder handles the response to the GetCertificateContacts request. The method always @@ -1444,7 +1604,7 @@ func (client BaseClient) GetCertificateIssuerPreparer(ctx context.Context, vault "issuer-name": autorest.Encode("path", issuerName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1460,8 +1620,8 @@ func (client BaseClient) GetCertificateIssuerPreparer(ctx context.Context, vault // GetCertificateIssuerSender sends the GetCertificateIssuer request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetCertificateIssuerResponder handles the response to the GetCertificateIssuer request. The method always @@ -1531,7 +1691,7 @@ func (client BaseClient) GetCertificateIssuersPreparer(ctx context.Context, vaul "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1550,8 +1710,8 @@ func (client BaseClient) GetCertificateIssuersPreparer(ctx context.Context, vaul // GetCertificateIssuersSender sends the GetCertificateIssuers request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateIssuersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetCertificateIssuersResponder handles the response to the GetCertificateIssuers request. The method always @@ -1651,7 +1811,7 @@ func (client BaseClient) GetCertificateOperationPreparer(ctx context.Context, va "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1667,8 +1827,8 @@ func (client BaseClient) GetCertificateOperationPreparer(ctx context.Context, va // GetCertificateOperationSender sends the GetCertificateOperation request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateOperationSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetCertificateOperationResponder handles the response to the GetCertificateOperation request. The method always @@ -1731,7 +1891,7 @@ func (client BaseClient) GetCertificatePolicyPreparer(ctx context.Context, vault "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1747,8 +1907,8 @@ func (client BaseClient) GetCertificatePolicyPreparer(ctx context.Context, vault // GetCertificatePolicySender sends the GetCertificatePolicy request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificatePolicySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetCertificatePolicyResponder handles the response to the GetCertificatePolicy request. The method always @@ -1770,7 +1930,8 @@ func (client BaseClient) GetCertificatePolicyResponder(resp *http.Response) (res // vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. // maxresults - maximum number of results to return in a page. If not specified the service will return up to // 25 results. -func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateListResultPage, err error) { +// includePending - specifies whether to include certificates which are not completely provisioned. +func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") defer func() { @@ -1791,7 +1952,7 @@ func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL strin } result.fn = client.getCertificatesNextResults - req, err := client.GetCertificatesPreparer(ctx, vaultBaseURL, maxresults) + req, err := client.GetCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) if err != nil { err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", nil, "Failure preparing request") return @@ -1813,18 +1974,21 @@ func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL strin } // GetCertificatesPreparer prepares the GetCertificates request. -func (client BaseClient) GetCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { +func (client BaseClient) GetCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { urlParameters := map[string]interface{}{ "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } if maxresults != nil { queryParameters["maxresults"] = autorest.Encode("query", *maxresults) } + if includePending != nil { + queryParameters["includePending"] = autorest.Encode("query", *includePending) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -1837,8 +2001,8 @@ func (client BaseClient) GetCertificatesPreparer(ctx context.Context, vaultBaseU // GetCertificatesSender sends the GetCertificates request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificatesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetCertificatesResponder handles the response to the GetCertificates request. The method always @@ -1876,7 +2040,7 @@ func (client BaseClient) getCertificatesNextResults(ctx context.Context, lastRes } // GetCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateListResultIterator, err error) { +func (client BaseClient) GetCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") defer func() { @@ -1887,7 +2051,7 @@ func (client BaseClient) GetCertificatesComplete(ctx context.Context, vaultBaseU tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.GetCertificates(ctx, vaultBaseURL, maxresults) + result.page, err = client.GetCertificates(ctx, vaultBaseURL, maxresults, includePending) return } @@ -1950,7 +2114,7 @@ func (client BaseClient) GetCertificateVersionsPreparer(ctx context.Context, vau "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1969,8 +2133,8 @@ func (client BaseClient) GetCertificateVersionsPreparer(ctx context.Context, vau // GetCertificateVersionsSender sends the GetCertificateVersions request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateVersionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetCertificateVersionsResponder handles the response to the GetCertificateVersions request. The method always @@ -2071,7 +2235,7 @@ func (client BaseClient) GetDeletedCertificatePreparer(ctx context.Context, vaul "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2087,8 +2251,8 @@ func (client BaseClient) GetDeletedCertificatePreparer(ctx context.Context, vaul // GetDeletedCertificateSender sends the GetDeletedCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetDeletedCertificateResponder handles the response to the GetDeletedCertificate request. The method always @@ -2112,7 +2276,8 @@ func (client BaseClient) GetDeletedCertificateResponder(resp *http.Response) (re // vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. // maxresults - maximum number of results to return in a page. If not specified the service will return up to // 25 results. -func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedCertificateListResultPage, err error) { +// includePending - specifies whether to include certificates which are not completely provisioned. +func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") defer func() { @@ -2133,7 +2298,7 @@ func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseUR } result.fn = client.getDeletedCertificatesNextResults - req, err := client.GetDeletedCertificatesPreparer(ctx, vaultBaseURL, maxresults) + req, err := client.GetDeletedCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) if err != nil { err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", nil, "Failure preparing request") return @@ -2155,18 +2320,21 @@ func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseUR } // GetDeletedCertificatesPreparer prepares the GetDeletedCertificates request. -func (client BaseClient) GetDeletedCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { +func (client BaseClient) GetDeletedCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { urlParameters := map[string]interface{}{ "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } if maxresults != nil { queryParameters["maxresults"] = autorest.Encode("query", *maxresults) } + if includePending != nil { + queryParameters["includePending"] = autorest.Encode("query", *includePending) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -2179,8 +2347,8 @@ func (client BaseClient) GetDeletedCertificatesPreparer(ctx context.Context, vau // GetDeletedCertificatesSender sends the GetDeletedCertificates request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedCertificatesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetDeletedCertificatesResponder handles the response to the GetDeletedCertificates request. The method always @@ -2218,7 +2386,7 @@ func (client BaseClient) getDeletedCertificatesNextResults(ctx context.Context, } // GetDeletedCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedCertificateListResultIterator, err error) { +func (client BaseClient) GetDeletedCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") defer func() { @@ -2229,7 +2397,7 @@ func (client BaseClient) GetDeletedCertificatesComplete(ctx context.Context, vau tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.GetDeletedCertificates(ctx, vaultBaseURL, maxresults) + result.page, err = client.GetDeletedCertificates(ctx, vaultBaseURL, maxresults, includePending) return } @@ -2281,7 +2449,7 @@ func (client BaseClient) GetDeletedKeyPreparer(ctx context.Context, vaultBaseURL "key-name": autorest.Encode("path", keyName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2297,8 +2465,8 @@ func (client BaseClient) GetDeletedKeyPreparer(ctx context.Context, vaultBaseURL // GetDeletedKeySender sends the GetDeletedKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetDeletedKeyResponder handles the response to the GetDeletedKey request. The method always @@ -2370,7 +2538,7 @@ func (client BaseClient) GetDeletedKeysPreparer(ctx context.Context, vaultBaseUR "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2389,8 +2557,8 @@ func (client BaseClient) GetDeletedKeysPreparer(ctx context.Context, vaultBaseUR // GetDeletedKeysSender sends the GetDeletedKeys request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedKeysSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetDeletedKeysResponder handles the response to the GetDeletedKeys request. The method always @@ -2443,6 +2611,230 @@ func (client BaseClient) GetDeletedKeysComplete(ctx context.Context, vaultBaseUR return } +// GetDeletedSasDefinition the Get Deleted SAS Definition operation returns the specified deleted SAS definition along +// with its attributes. This operation requires the storage/getsas permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// sasDefinitionName - the name of the SAS definition. +func (client BaseClient) GetDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinition") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: sasDefinitionName, + Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinition", err.Error()) + } + + req, err := client.GetDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedSasDefinitionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure sending request") + return + } + + result, err = client.GetDeletedSasDefinitionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure responding to request") + } + + return +} + +// GetDeletedSasDefinitionPreparer prepares the GetDeletedSasDefinition request. +func (client BaseClient) GetDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "sas-definition-name": autorest.Encode("path", sasDefinitionName), + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedSasDefinitionSender sends the GetDeletedSasDefinition request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetDeletedSasDefinitionResponder handles the response to the GetDeletedSasDefinition request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetDeletedSasDefinitions the Get Deleted Sas Definitions operation returns the SAS definitions that have been +// deleted for a vault enabled for soft-delete. This operation requires the storage/listsas permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetDeletedSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") + defer func() { + sc := -1 + if result.dsdlr.Response.Response != nil { + sc = result.dsdlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinitions", err.Error()) + } + + result.fn = client.getDeletedSasDefinitionsNextResults + req, err := client.GetDeletedSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedSasDefinitionsSender(req) + if err != nil { + result.dsdlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure sending request") + return + } + + result.dsdlr, err = client.GetDeletedSasDefinitionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure responding to request") + } + + return +} + +// GetDeletedSasDefinitionsPreparer prepares the GetDeletedSasDefinitions request. +func (client BaseClient) GetDeletedSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedSasDefinitionsSender sends the GetDeletedSasDefinitions request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedSasDefinitionsSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetDeletedSasDefinitionsResponder handles the response to the GetDeletedSasDefinitions request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedSasDefinitionsResponder(resp *http.Response) (result DeletedSasDefinitionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getDeletedSasDefinitionsNextResults retrieves the next set of results, if any. +func (client BaseClient) getDeletedSasDefinitionsNextResults(ctx context.Context, lastResults DeletedSasDefinitionListResult) (result DeletedSasDefinitionListResult, err error) { + req, err := lastResults.deletedSasDefinitionListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetDeletedSasDefinitionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetDeletedSasDefinitionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetDeletedSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetDeletedSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetDeletedSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) + return +} + // GetDeletedSecret the Get Deleted Secret operation returns the specified deleted secret along with its attributes. // This operation requires the secrets/get permission. // Parameters: @@ -2490,7 +2882,7 @@ func (client BaseClient) GetDeletedSecretPreparer(ctx context.Context, vaultBase "secret-name": autorest.Encode("path", secretName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2506,8 +2898,8 @@ func (client BaseClient) GetDeletedSecretPreparer(ctx context.Context, vaultBase // GetDeletedSecretSender sends the GetDeletedSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedSecretSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetDeletedSecretResponder handles the response to the GetDeletedSecret request. The method always @@ -2577,7 +2969,7 @@ func (client BaseClient) GetDeletedSecretsPreparer(ctx context.Context, vaultBas "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2596,8 +2988,8 @@ func (client BaseClient) GetDeletedSecretsPreparer(ctx context.Context, vaultBas // GetDeletedSecretsSender sends the GetDeletedSecrets request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedSecretsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetDeletedSecretsResponder handles the response to the GetDeletedSecrets request. The method always @@ -2650,6 +3042,219 @@ func (client BaseClient) GetDeletedSecretsComplete(ctx context.Context, vaultBas return } +// GetDeletedStorageAccount the Get Deleted Storage Account operation returns the specified deleted storage account +// along with its attributes. This operation requires the storage/get permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) GetDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccount", err.Error()) + } + + req, err := client.GetDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.GetDeletedStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure responding to request") + } + + return +} + +// GetDeletedStorageAccountPreparer prepares the GetDeletedStorageAccount request. +func (client BaseClient) GetDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedStorageAccountSender sends the GetDeletedStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetDeletedStorageAccountResponder handles the response to the GetDeletedStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetDeletedStorageAccounts the Get Deleted Storage Accounts operation returns the storage accounts that have been +// deleted for a vault enabled for soft-delete. This operation requires the storage/list permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// maxresults - maximum number of results to return in a page. If not specified the service will return up to +// 25 results. +func (client BaseClient) GetDeletedStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") + defer func() { + sc := -1 + if result.dslr.Response.Response != nil { + sc = result.dslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: maxresults, + Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccounts", err.Error()) + } + + result.fn = client.getDeletedStorageAccountsNextResults + req, err := client.GetDeletedStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", nil, "Failure preparing request") + return + } + + resp, err := client.GetDeletedStorageAccountsSender(req) + if err != nil { + result.dslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure sending request") + return + } + + result.dslr, err = client.GetDeletedStorageAccountsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure responding to request") + } + + return +} + +// GetDeletedStorageAccountsPreparer prepares the GetDeletedStorageAccounts request. +func (client BaseClient) GetDeletedStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/deletedstorage"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetDeletedStorageAccountsSender sends the GetDeletedStorageAccounts request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) GetDeletedStorageAccountsSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetDeletedStorageAccountsResponder handles the response to the GetDeletedStorageAccounts request. The method always +// closes the http.Response Body. +func (client BaseClient) GetDeletedStorageAccountsResponder(resp *http.Response) (result DeletedStorageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// getDeletedStorageAccountsNextResults retrieves the next set of results, if any. +func (client BaseClient) getDeletedStorageAccountsNextResults(ctx context.Context, lastResults DeletedStorageListResult) (result DeletedStorageListResult, err error) { + req, err := lastResults.deletedStorageListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.GetDeletedStorageAccountsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure sending next results request") + } + result, err = client.GetDeletedStorageAccountsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure responding to next results request") + } + return +} + +// GetDeletedStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. +func (client BaseClient) GetDeletedStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.GetDeletedStorageAccounts(ctx, vaultBaseURL, maxresults) + return +} + // GetKey the get key operation is applicable to all key types. If the requested key is symmetric, then no key material // is released in the response. This operation requires the keys/get permission. // Parameters: @@ -2699,7 +3304,7 @@ func (client BaseClient) GetKeyPreparer(ctx context.Context, vaultBaseURL string "key-version": autorest.Encode("path", keyVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2715,8 +3320,8 @@ func (client BaseClient) GetKeyPreparer(ctx context.Context, vaultBaseURL string // GetKeySender sends the GetKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetKeyResponder handles the response to the GetKey request. The method always @@ -2788,7 +3393,7 @@ func (client BaseClient) GetKeysPreparer(ctx context.Context, vaultBaseURL strin "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2807,8 +3412,8 @@ func (client BaseClient) GetKeysPreparer(ctx context.Context, vaultBaseURL strin // GetKeysSender sends the GetKeys request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetKeysSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetKeysResponder handles the response to the GetKeys request. The method always @@ -2920,7 +3525,7 @@ func (client BaseClient) GetKeyVersionsPreparer(ctx context.Context, vaultBaseUR "key-name": autorest.Encode("path", keyName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -2939,8 +3544,8 @@ func (client BaseClient) GetKeyVersionsPreparer(ctx context.Context, vaultBaseUR // GetKeyVersionsSender sends the GetKeyVersions request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetKeyVersionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetKeyVersionsResponder handles the response to the GetKeyVersions request. The method always @@ -3050,7 +3655,7 @@ func (client BaseClient) GetSasDefinitionPreparer(ctx context.Context, vaultBase "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -3066,8 +3671,8 @@ func (client BaseClient) GetSasDefinitionPreparer(ctx context.Context, vaultBase // GetSasDefinitionSender sends the GetSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSasDefinitionSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetSasDefinitionResponder handles the response to the GetSasDefinition request. The method always @@ -3144,7 +3749,7 @@ func (client BaseClient) GetSasDefinitionsPreparer(ctx context.Context, vaultBas "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -3163,8 +3768,8 @@ func (client BaseClient) GetSasDefinitionsPreparer(ctx context.Context, vaultBas // GetSasDefinitionsSender sends the GetSasDefinitions request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSasDefinitionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetSasDefinitionsResponder handles the response to the GetSasDefinitions request. The method always @@ -3266,7 +3871,7 @@ func (client BaseClient) GetSecretPreparer(ctx context.Context, vaultBaseURL str "secret-version": autorest.Encode("path", secretVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -3282,8 +3887,8 @@ func (client BaseClient) GetSecretPreparer(ctx context.Context, vaultBaseURL str // GetSecretSender sends the GetSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSecretSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetSecretResponder handles the response to the GetSecret request. The method always @@ -3354,7 +3959,7 @@ func (client BaseClient) GetSecretsPreparer(ctx context.Context, vaultBaseURL st "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -3373,8 +3978,8 @@ func (client BaseClient) GetSecretsPreparer(ctx context.Context, vaultBaseURL st // GetSecretsSender sends the GetSecrets request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSecretsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetSecretsResponder handles the response to the GetSecrets request. The method always @@ -3486,7 +4091,7 @@ func (client BaseClient) GetSecretVersionsPreparer(ctx context.Context, vaultBas "secret-name": autorest.Encode("path", secretName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -3505,8 +4110,8 @@ func (client BaseClient) GetSecretVersionsPreparer(ctx context.Context, vaultBas // GetSecretVersionsSender sends the GetSecretVersions request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSecretVersionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetSecretVersionsResponder handles the response to the GetSecretVersions request. The method always @@ -3612,7 +4217,7 @@ func (client BaseClient) GetStorageAccountPreparer(ctx context.Context, vaultBas "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -3628,8 +4233,8 @@ func (client BaseClient) GetStorageAccountPreparer(ctx context.Context, vaultBas // GetStorageAccountSender sends the GetStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetStorageAccountSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetStorageAccountResponder handles the response to the GetStorageAccount request. The method always @@ -3699,7 +4304,7 @@ func (client BaseClient) GetStorageAccountsPreparer(ctx context.Context, vaultBa "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -3718,8 +4323,8 @@ func (client BaseClient) GetStorageAccountsPreparer(ctx context.Context, vaultBa // GetStorageAccountsSender sends the GetStorageAccounts request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetStorageAccountsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetStorageAccountsResponder handles the response to the GetStorageAccounts request. The method always @@ -3835,7 +4440,7 @@ func (client BaseClient) ImportCertificatePreparer(ctx context.Context, vaultBas "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -3853,8 +4458,8 @@ func (client BaseClient) ImportCertificatePreparer(ctx context.Context, vaultBas // ImportCertificateSender sends the ImportCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) ImportCertificateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ImportCertificateResponder handles the response to the ImportCertificate request. The method always @@ -3927,7 +4532,7 @@ func (client BaseClient) ImportKeyPreparer(ctx context.Context, vaultBaseURL str "key-name": autorest.Encode("path", keyName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -3945,8 +4550,8 @@ func (client BaseClient) ImportKeyPreparer(ctx context.Context, vaultBaseURL str // ImportKeySender sends the ImportKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) ImportKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ImportKeyResponder handles the response to the ImportKey request. The method always @@ -4016,7 +4621,7 @@ func (client BaseClient) MergeCertificatePreparer(ctx context.Context, vaultBase "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4034,8 +4639,8 @@ func (client BaseClient) MergeCertificatePreparer(ctx context.Context, vaultBase // MergeCertificateSender sends the MergeCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) MergeCertificateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // MergeCertificateResponder handles the response to the MergeCertificate request. The method always @@ -4099,7 +4704,7 @@ func (client BaseClient) PurgeDeletedCertificatePreparer(ctx context.Context, va "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4115,8 +4720,8 @@ func (client BaseClient) PurgeDeletedCertificatePreparer(ctx context.Context, va // PurgeDeletedCertificateSender sends the PurgeDeletedCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) PurgeDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // PurgeDeletedCertificateResponder handles the response to the PurgeDeletedCertificate request. The method always @@ -4179,7 +4784,7 @@ func (client BaseClient) PurgeDeletedKeyPreparer(ctx context.Context, vaultBaseU "key-name": autorest.Encode("path", keyName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4195,8 +4800,8 @@ func (client BaseClient) PurgeDeletedKeyPreparer(ctx context.Context, vaultBaseU // PurgeDeletedKeySender sends the PurgeDeletedKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) PurgeDeletedKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // PurgeDeletedKeyResponder handles the response to the PurgeDeletedKey request. The method always @@ -4259,7 +4864,7 @@ func (client BaseClient) PurgeDeletedSecretPreparer(ctx context.Context, vaultBa "secret-name": autorest.Encode("path", secretName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4275,8 +4880,8 @@ func (client BaseClient) PurgeDeletedSecretPreparer(ctx context.Context, vaultBa // PurgeDeletedSecretSender sends the PurgeDeletedSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) PurgeDeletedSecretSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // PurgeDeletedSecretResponder handles the response to the PurgeDeletedSecret request. The method always @@ -4291,6 +4896,92 @@ func (client BaseClient) PurgeDeletedSecretResponder(resp *http.Response) (resul return } +// PurgeDeletedStorageAccount the purge deleted storage account operation removes the secret permanently, without the +// possibility of recovery. This operation can only be performed on a soft-delete enabled vault. This operation +// requires the storage/purge permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) PurgeDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedStorageAccount") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "PurgeDeletedStorageAccount", err.Error()) + } + + req, err := client.PurgeDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.PurgeDeletedStorageAccountSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.PurgeDeletedStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure responding to request") + } + + return +} + +// PurgeDeletedStorageAccountPreparer prepares the PurgeDeletedStorageAccount request. +func (client BaseClient) PurgeDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PurgeDeletedStorageAccountSender sends the PurgeDeletedStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) PurgeDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// PurgeDeletedStorageAccountResponder handles the response to the PurgeDeletedStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) PurgeDeletedStorageAccountResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + // RecoverDeletedCertificate the RecoverDeletedCertificate operation performs the reversal of the Delete operation. The // operation is applicable in vaults enabled for soft-delete, and must be issued during the retention interval // (available in the deleted certificate's attributes). This operation requires the certificates/recover permission. @@ -4339,7 +5030,7 @@ func (client BaseClient) RecoverDeletedCertificatePreparer(ctx context.Context, "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4355,8 +5046,8 @@ func (client BaseClient) RecoverDeletedCertificatePreparer(ctx context.Context, // RecoverDeletedCertificateSender sends the RecoverDeletedCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RecoverDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RecoverDeletedCertificateResponder handles the response to the RecoverDeletedCertificate request. The method always @@ -4421,7 +5112,7 @@ func (client BaseClient) RecoverDeletedKeyPreparer(ctx context.Context, vaultBas "key-name": autorest.Encode("path", keyName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4437,8 +5128,8 @@ func (client BaseClient) RecoverDeletedKeyPreparer(ctx context.Context, vaultBas // RecoverDeletedKeySender sends the RecoverDeletedKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RecoverDeletedKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RecoverDeletedKeyResponder handles the response to the RecoverDeletedKey request. The method always @@ -4454,6 +5145,96 @@ func (client BaseClient) RecoverDeletedKeyResponder(resp *http.Response) (result return } +// RecoverDeletedSasDefinition recovers the deleted SAS definition for the specified storage account. This operation +// can only be performed on a soft-delete enabled vault. This operation requires the storage/recover permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +// sasDefinitionName - the name of the SAS definition. +func (client BaseClient) RecoverDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSasDefinition") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, + {TargetValue: sasDefinitionName, + Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedSasDefinition", err.Error()) + } + + req, err := client.RecoverDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", nil, "Failure preparing request") + return + } + + resp, err := client.RecoverDeletedSasDefinitionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure sending request") + return + } + + result, err = client.RecoverDeletedSasDefinitionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure responding to request") + } + + return +} + +// RecoverDeletedSasDefinitionPreparer prepares the RecoverDeletedSasDefinition request. +func (client BaseClient) RecoverDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "sas-definition-name": autorest.Encode("path", sasDefinitionName), + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}/recover", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RecoverDeletedSasDefinitionSender sends the RecoverDeletedSasDefinition request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RecoverDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// RecoverDeletedSasDefinitionResponder handles the response to the RecoverDeletedSasDefinition request. The method always +// closes the http.Response Body. +func (client BaseClient) RecoverDeletedSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // RecoverDeletedSecret recovers the deleted secret in the specified vault. This operation can only be performed on a // soft-delete enabled vault. This operation requires the secrets/recover permission. // Parameters: @@ -4501,7 +5282,7 @@ func (client BaseClient) RecoverDeletedSecretPreparer(ctx context.Context, vault "secret-name": autorest.Encode("path", secretName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4517,8 +5298,8 @@ func (client BaseClient) RecoverDeletedSecretPreparer(ctx context.Context, vault // RecoverDeletedSecretSender sends the RecoverDeletedSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RecoverDeletedSecretSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RecoverDeletedSecretResponder handles the response to the RecoverDeletedSecret request. The method always @@ -4534,6 +5315,92 @@ func (client BaseClient) RecoverDeletedSecretResponder(resp *http.Response) (res return } +// RecoverDeletedStorageAccount recovers the deleted storage account in the specified vault. This operation can only be +// performed on a soft-delete enabled vault. This operation requires the storage/recover permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// storageAccountName - the name of the storage account. +func (client BaseClient) RecoverDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: storageAccountName, + Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedStorageAccount", err.Error()) + } + + req, err := client.RecoverDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.RecoverDeletedStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.RecoverDeletedStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure responding to request") + } + + return +} + +// RecoverDeletedStorageAccountPreparer prepares the RecoverDeletedStorageAccount request. +func (client BaseClient) RecoverDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + pathParameters := map[string]interface{}{ + "storage-account-name": autorest.Encode("path", storageAccountName), + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPathParameters("/deletedstorage/{storage-account-name}/recover", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RecoverDeletedStorageAccountSender sends the RecoverDeletedStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RecoverDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// RecoverDeletedStorageAccountResponder handles the response to the RecoverDeletedStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) RecoverDeletedStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // RegenerateStorageAccountKey regenerates the specified key value for the given storage account. This operation // requires the storage/regeneratekey permission. // Parameters: @@ -4590,7 +5457,7 @@ func (client BaseClient) RegenerateStorageAccountKeyPreparer(ctx context.Context "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4608,8 +5475,8 @@ func (client BaseClient) RegenerateStorageAccountKeyPreparer(ctx context.Context // RegenerateStorageAccountKeySender sends the RegenerateStorageAccountKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RegenerateStorageAccountKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RegenerateStorageAccountKeyResponder handles the response to the RegenerateStorageAccountKey request. The method always @@ -4625,6 +5492,90 @@ func (client BaseClient) RegenerateStorageAccountKeyResponder(resp *http.Respons return } +// RestoreCertificate restores a backed up certificate, and all its versions, to a vault. This operation requires the +// certificates/restore permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// parameters - the parameters to restore the certificate. +func (client BaseClient) RestoreCertificate(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (result CertificateBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreCertificate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.CertificateBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RestoreCertificate", err.Error()) + } + + req, err := client.RestoreCertificatePreparer(ctx, vaultBaseURL, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", nil, "Failure preparing request") + return + } + + resp, err := client.RestoreCertificateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure sending request") + return + } + + result, err = client.RestoreCertificateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure responding to request") + } + + return +} + +// RestoreCertificatePreparer prepares the RestoreCertificate request. +func (client BaseClient) RestoreCertificatePreparer(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/certificates/restore"), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestoreCertificateSender sends the RestoreCertificate request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RestoreCertificateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// RestoreCertificateResponder handles the response to the RestoreCertificate request. The method always +// closes the http.Response Body. +func (client BaseClient) RestoreCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // RestoreKey imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier, // attributes and access control policies. The RESTORE operation may be used to import a previously backed up key. // Individual versions of a key cannot be restored. The key is restored in its entirety with the same key name as it @@ -4681,7 +5632,7 @@ func (client BaseClient) RestoreKeyPreparer(ctx context.Context, vaultBaseURL st "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4699,8 +5650,8 @@ func (client BaseClient) RestoreKeyPreparer(ctx context.Context, vaultBaseURL st // RestoreKeySender sends the RestoreKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RestoreKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RestoreKeyResponder handles the response to the RestoreKey request. The method always @@ -4765,7 +5716,7 @@ func (client BaseClient) RestoreSecretPreparer(ctx context.Context, vaultBaseURL "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4783,8 +5734,8 @@ func (client BaseClient) RestoreSecretPreparer(ctx context.Context, vaultBaseURL // RestoreSecretSender sends the RestoreSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RestoreSecretSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // RestoreSecretResponder handles the response to the RestoreSecret request. The method always @@ -4800,6 +5751,90 @@ func (client BaseClient) RestoreSecretResponder(resp *http.Response) (result Sec return } +// RestoreStorageAccount restores a backed up storage account to a vault. This operation requires the storage/restore +// permission. +// Parameters: +// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. +// parameters - the parameters to restore the storage account. +func (client BaseClient) RestoreStorageAccount(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (result StorageBundle, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreStorageAccount") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.StorageBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("keyvault.BaseClient", "RestoreStorageAccount", err.Error()) + } + + req, err := client.RestoreStorageAccountPreparer(ctx, vaultBaseURL, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", nil, "Failure preparing request") + return + } + + resp, err := client.RestoreStorageAccountSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure sending request") + return + } + + result, err = client.RestoreStorageAccountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure responding to request") + } + + return +} + +// RestoreStorageAccountPreparer prepares the RestoreStorageAccount request. +func (client BaseClient) RestoreStorageAccountPreparer(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "vaultBaseUrl": vaultBaseURL, + } + + const APIVersion = "7.0" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), + autorest.WithPath("/storage/restore"), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestoreStorageAccountSender sends the RestoreStorageAccount request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) RestoreStorageAccountSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// RestoreStorageAccountResponder handles the response to the RestoreStorageAccount request. The method always +// closes the http.Response Body. +func (client BaseClient) RestoreStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // SetCertificateContacts sets the certificate contacts for the specified key vault. This operation requires the // certificates/managecontacts permission. // Parameters: @@ -4843,7 +5878,7 @@ func (client BaseClient) SetCertificateContactsPreparer(ctx context.Context, vau "vaultBaseUrl": vaultBaseURL, } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4862,8 +5897,8 @@ func (client BaseClient) SetCertificateContactsPreparer(ctx context.Context, vau // SetCertificateContactsSender sends the SetCertificateContacts request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetCertificateContactsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // SetCertificateContactsResponder handles the response to the SetCertificateContacts request. The method always @@ -4933,7 +5968,7 @@ func (client BaseClient) SetCertificateIssuerPreparer(ctx context.Context, vault "issuer-name": autorest.Encode("path", issuerName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -4951,8 +5986,8 @@ func (client BaseClient) SetCertificateIssuerPreparer(ctx context.Context, vault // SetCertificateIssuerSender sends the SetCertificateIssuer request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // SetCertificateIssuerResponder handles the response to the SetCertificateIssuer request. The method always @@ -4992,7 +6027,8 @@ func (client BaseClient) SetSasDefinition(ctx context.Context, vaultBaseURL stri {TargetValue: sasDefinitionName, Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Parameters", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + Constraints: []validation.Constraint{{Target: "parameters.TemplateURI", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ValidityPeriod", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "SetSasDefinition", err.Error()) } @@ -5028,7 +6064,7 @@ func (client BaseClient) SetSasDefinitionPreparer(ctx context.Context, vaultBase "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5046,8 +6082,8 @@ func (client BaseClient) SetSasDefinitionPreparer(ctx context.Context, vaultBase // SetSasDefinitionSender sends the SetSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetSasDefinitionSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // SetSasDefinitionResponder handles the response to the SetSasDefinition request. The method always @@ -5119,7 +6155,7 @@ func (client BaseClient) SetSecretPreparer(ctx context.Context, vaultBaseURL str "secret-name": autorest.Encode("path", secretName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5137,8 +6173,8 @@ func (client BaseClient) SetSecretPreparer(ctx context.Context, vaultBaseURL str // SetSecretSender sends the SetSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetSecretSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // SetSecretResponder handles the response to the SetSecret request. The method always @@ -5211,7 +6247,7 @@ func (client BaseClient) SetStorageAccountPreparer(ctx context.Context, vaultBas "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5229,8 +6265,8 @@ func (client BaseClient) SetStorageAccountPreparer(ctx context.Context, vaultBas // SetStorageAccountSender sends the SetStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetStorageAccountSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // SetStorageAccountResponder handles the response to the SetStorageAccount request. The method always @@ -5302,7 +6338,7 @@ func (client BaseClient) SignPreparer(ctx context.Context, vaultBaseURL string, "key-version": autorest.Encode("path", keyVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5320,8 +6356,8 @@ func (client BaseClient) SignPreparer(ctx context.Context, vaultBaseURL string, // SignSender sends the Sign request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SignSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // SignResponder handles the response to the Sign request. The method always @@ -5395,7 +6431,7 @@ func (client BaseClient) UnwrapKeyPreparer(ctx context.Context, vaultBaseURL str "key-version": autorest.Encode("path", keyVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5413,8 +6449,8 @@ func (client BaseClient) UnwrapKeyPreparer(ctx context.Context, vaultBaseURL str // UnwrapKeySender sends the UnwrapKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UnwrapKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UnwrapKeyResponder handles the response to the UnwrapKey request. The method always @@ -5480,7 +6516,7 @@ func (client BaseClient) UpdateCertificatePreparer(ctx context.Context, vaultBas "certificate-version": autorest.Encode("path", certificateVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5498,8 +6534,8 @@ func (client BaseClient) UpdateCertificatePreparer(ctx context.Context, vaultBas // UpdateCertificateSender sends the UpdateCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateCertificateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateCertificateResponder handles the response to the UpdateCertificate request. The method always @@ -5563,7 +6599,7 @@ func (client BaseClient) UpdateCertificateIssuerPreparer(ctx context.Context, va "issuer-name": autorest.Encode("path", issuerName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5581,8 +6617,8 @@ func (client BaseClient) UpdateCertificateIssuerPreparer(ctx context.Context, va // UpdateCertificateIssuerSender sends the UpdateCertificateIssuer request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateCertificateIssuerResponder handles the response to the UpdateCertificateIssuer request. The method always @@ -5646,7 +6682,7 @@ func (client BaseClient) UpdateCertificateOperationPreparer(ctx context.Context, "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5664,8 +6700,8 @@ func (client BaseClient) UpdateCertificateOperationPreparer(ctx context.Context, // UpdateCertificateOperationSender sends the UpdateCertificateOperation request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateCertificateOperationSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateCertificateOperationResponder handles the response to the UpdateCertificateOperation request. The method always @@ -5729,7 +6765,7 @@ func (client BaseClient) UpdateCertificatePolicyPreparer(ctx context.Context, va "certificate-name": autorest.Encode("path", certificateName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5748,8 +6784,8 @@ func (client BaseClient) UpdateCertificatePolicyPreparer(ctx context.Context, va // UpdateCertificatePolicySender sends the UpdateCertificatePolicy request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateCertificatePolicySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateCertificatePolicyResponder handles the response to the UpdateCertificatePolicy request. The method always @@ -5815,7 +6851,7 @@ func (client BaseClient) UpdateKeyPreparer(ctx context.Context, vaultBaseURL str "key-version": autorest.Encode("path", keyVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5833,8 +6869,8 @@ func (client BaseClient) UpdateKeyPreparer(ctx context.Context, vaultBaseURL str // UpdateKeySender sends the UpdateKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateKeyResponder handles the response to the UpdateKey request. The method always @@ -5908,7 +6944,7 @@ func (client BaseClient) UpdateSasDefinitionPreparer(ctx context.Context, vaultB "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -5926,8 +6962,8 @@ func (client BaseClient) UpdateSasDefinitionPreparer(ctx context.Context, vaultB // UpdateSasDefinitionSender sends the UpdateSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateSasDefinitionSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateSasDefinitionResponder handles the response to the UpdateSasDefinition request. The method always @@ -5994,7 +7030,7 @@ func (client BaseClient) UpdateSecretPreparer(ctx context.Context, vaultBaseURL "secret-version": autorest.Encode("path", secretVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -6012,8 +7048,8 @@ func (client BaseClient) UpdateSecretPreparer(ctx context.Context, vaultBaseURL // UpdateSecretSender sends the UpdateSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateSecretSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateSecretResponder handles the response to the UpdateSecret request. The method always @@ -6083,7 +7119,7 @@ func (client BaseClient) UpdateStorageAccountPreparer(ctx context.Context, vault "storage-account-name": autorest.Encode("path", storageAccountName), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -6101,8 +7137,8 @@ func (client BaseClient) UpdateStorageAccountPreparer(ctx context.Context, vault // UpdateStorageAccountSender sends the UpdateStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateStorageAccountSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // UpdateStorageAccountResponder handles the response to the UpdateStorageAccount request. The method always @@ -6177,7 +7213,7 @@ func (client BaseClient) VerifyPreparer(ctx context.Context, vaultBaseURL string "key-version": autorest.Encode("path", keyVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -6195,8 +7231,8 @@ func (client BaseClient) VerifyPreparer(ctx context.Context, vaultBaseURL string // VerifySender sends the Verify request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) VerifySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // VerifyResponder handles the response to the Verify request. The method always @@ -6271,7 +7307,7 @@ func (client BaseClient) WrapKeyPreparer(ctx context.Context, vaultBaseURL strin "key-version": autorest.Encode("path", keyVersion), } - const APIVersion = "2016-10-01" + const APIVersion = "7.0" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -6289,8 +7325,8 @@ func (client BaseClient) WrapKeyPreparer(ctx context.Context, vaultBaseURL strin // WrapKeySender sends the WrapKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) WrapKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // WrapKeyResponder handles the response to the WrapKey request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go similarity index 81% rename from vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/models.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go index cbfec5797c..79fed73a9c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go @@ -28,7 +28,7 @@ import ( ) // The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault" +const fqdn = "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault" // ActionType enumerates the values for action type. type ActionType string @@ -49,17 +49,13 @@ func PossibleActionTypeValues() []ActionType { type DeletionRecoveryLevel string const ( - // Purgeable Soft-delete is not enabled for this vault. A DELETE operation results in immediate and - // irreversible data loss. + // Purgeable ... Purgeable DeletionRecoveryLevel = "Purgeable" - // Recoverable Soft-delete is enabled for this vault and purge has been disabled. A deleted entity will - // remain in this state until recovered, or the end of the retention interval. + // Recoverable ... Recoverable DeletionRecoveryLevel = "Recoverable" - // RecoverableProtectedSubscription Soft-delete is enabled for this vault, and the subscription is - // protected against immediate deletion. + // RecoverableProtectedSubscription ... RecoverableProtectedSubscription DeletionRecoveryLevel = "Recoverable+ProtectedSubscription" - // RecoverablePurgeable Soft-delete is enabled for this vault; A privileged user may trigger an immediate, - // irreversible deletion(purge) of a deleted entity. + // RecoverablePurgeable ... RecoverablePurgeable DeletionRecoveryLevel = "Recoverable+Purgeable" ) @@ -74,17 +70,17 @@ type JSONWebKeyCurveName string const ( // P256 The NIST P-256 elliptic curve, AKA SECG curve SECP256R1. P256 JSONWebKeyCurveName = "P-256" + // P256K The SECG SECP256K1 elliptic curve. + P256K JSONWebKeyCurveName = "P-256K" // P384 The NIST P-384 elliptic curve, AKA SECG curve SECP384R1. P384 JSONWebKeyCurveName = "P-384" // P521 The NIST P-521 elliptic curve, AKA SECG curve SECP521R1. P521 JSONWebKeyCurveName = "P-521" - // SECP256K1 The SECG SECP256K1 elliptic curve. - SECP256K1 JSONWebKeyCurveName = "SECP256K1" ) // PossibleJSONWebKeyCurveNameValues returns an array of possible values for the JSONWebKeyCurveName const type. func PossibleJSONWebKeyCurveNameValues() []JSONWebKeyCurveName { - return []JSONWebKeyCurveName{P256, P384, P521, SECP256K1} + return []JSONWebKeyCurveName{P256, P256K, P384, P521} } // JSONWebKeyEncryptionAlgorithm enumerates the values for json web key encryption algorithm. @@ -131,48 +127,51 @@ func PossibleJSONWebKeyOperationValues() []JSONWebKeyOperation { type JSONWebKeySignatureAlgorithm string const ( - // ECDSA256 ... - ECDSA256 JSONWebKeySignatureAlgorithm = "ECDSA256" - // ES256 ... + // ES256 ECDSA using P-256 and SHA-256, as described in https://tools.ietf.org/html/rfc7518. ES256 JSONWebKeySignatureAlgorithm = "ES256" - // ES384 ... + // ES256K ECDSA using P-256K and SHA-256, as described in https://tools.ietf.org/html/rfc7518 + ES256K JSONWebKeySignatureAlgorithm = "ES256K" + // ES384 ECDSA using P-384 and SHA-384, as described in https://tools.ietf.org/html/rfc7518 ES384 JSONWebKeySignatureAlgorithm = "ES384" - // ES512 ... + // ES512 ECDSA using P-521 and SHA-512, as described in https://tools.ietf.org/html/rfc7518 ES512 JSONWebKeySignatureAlgorithm = "ES512" - // PS256 ... + // PS256 RSASSA-PSS using SHA-256 and MGF1 with SHA-256, as described in + // https://tools.ietf.org/html/rfc7518 PS256 JSONWebKeySignatureAlgorithm = "PS256" - // PS384 ... + // PS384 RSASSA-PSS using SHA-384 and MGF1 with SHA-384, as described in + // https://tools.ietf.org/html/rfc7518 PS384 JSONWebKeySignatureAlgorithm = "PS384" - // PS512 ... + // PS512 RSASSA-PSS using SHA-512 and MGF1 with SHA-512, as described in + // https://tools.ietf.org/html/rfc7518 PS512 JSONWebKeySignatureAlgorithm = "PS512" - // RS256 ... + // RS256 RSASSA-PKCS1-v1_5 using SHA-256, as described in https://tools.ietf.org/html/rfc7518 RS256 JSONWebKeySignatureAlgorithm = "RS256" - // RS384 ... + // RS384 RSASSA-PKCS1-v1_5 using SHA-384, as described in https://tools.ietf.org/html/rfc7518 RS384 JSONWebKeySignatureAlgorithm = "RS384" - // RS512 ... + // RS512 RSASSA-PKCS1-v1_5 using SHA-512, as described in https://tools.ietf.org/html/rfc7518 RS512 JSONWebKeySignatureAlgorithm = "RS512" - // RSNULL ... + // RSNULL Reserved RSNULL JSONWebKeySignatureAlgorithm = "RSNULL" ) // PossibleJSONWebKeySignatureAlgorithmValues returns an array of possible values for the JSONWebKeySignatureAlgorithm const type. func PossibleJSONWebKeySignatureAlgorithmValues() []JSONWebKeySignatureAlgorithm { - return []JSONWebKeySignatureAlgorithm{ECDSA256, ES256, ES384, ES512, PS256, PS384, PS512, RS256, RS384, RS512, RSNULL} + return []JSONWebKeySignatureAlgorithm{ES256, ES256K, ES384, ES512, PS256, PS384, PS512, RS256, RS384, RS512, RSNULL} } // JSONWebKeyType enumerates the values for json web key type. type JSONWebKeyType string const ( - // EC ... + // EC Elliptic Curve. EC JSONWebKeyType = "EC" - // ECHSM ... + // ECHSM Elliptic Curve with a private key which is not exportable from the HSM. ECHSM JSONWebKeyType = "EC-HSM" - // Oct ... + // Oct Octet sequence (used to represent symmetric keys) Oct JSONWebKeyType = "oct" - // RSA ... + // RSA RSA (https://tools.ietf.org/html/rfc3447) RSA JSONWebKeyType = "RSA" - // RSAHSM ... + // RSAHSM RSA with a private key which is not exportable from the HSM. RSAHSM JSONWebKeyType = "RSA-HSM" ) @@ -210,6 +209,21 @@ func PossibleKeyUsageTypeValues() []KeyUsageType { return []KeyUsageType{CRLSign, DataEncipherment, DecipherOnly, DigitalSignature, EncipherOnly, KeyAgreement, KeyCertSign, KeyEncipherment, NonRepudiation} } +// SasTokenType enumerates the values for sas token type. +type SasTokenType string + +const ( + // Account ... + Account SasTokenType = "account" + // Service ... + Service SasTokenType = "service" +) + +// PossibleSasTokenTypeValues returns an array of possible values for the SasTokenType const type. +func PossibleSasTokenTypeValues() []SasTokenType { + return []SasTokenType{Account, Service} +} + // Action the action that will be executed. type Action struct { // ActionType - The type of the action. Possible values include: 'EmailContacts', 'AutoRenew' @@ -242,6 +256,13 @@ type Attributes struct { Updated *date.UnixTime `json:"updated,omitempty"` } +// BackupCertificateResult the backup certificate result, containing the backup blob. +type BackupCertificateResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; The backup blob containing the backed up certificate. (a URL-encoded base64 string) + Value *string `json:"value,omitempty"` +} + // BackupKeyResult the backup key result, containing the backup blob. type BackupKeyResult struct { autorest.Response `json:"-"` @@ -256,6 +277,13 @@ type BackupSecretResult struct { Value *string `json:"value,omitempty"` } +// BackupStorageResult the backup storage result, containing the backup blob. +type BackupStorageResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; The backup blob containing the backed up storage account. (a URL-encoded base64 string) + Value *string `json:"value,omitempty"` +} + // CertificateAttributes the certificate management attributes. type CertificateAttributes struct { // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for certificates in the current vault. If it contains 'Purgeable', the certificate can be permanently deleted by a privileged user; otherwise, only the system can purge the certificate, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription' @@ -801,6 +829,12 @@ type CertificatePolicy struct { Attributes *CertificateAttributes `json:"attributes,omitempty"` } +// CertificateRestoreParameters the certificate restore parameters. +type CertificateRestoreParameters struct { + // CertificateBundleBackup - The backup blob associated with a certificate bundle. (a URL-encoded base64 string) + CertificateBundleBackup *string `json:"value,omitempty"` +} + // CertificateUpdateParameters the certificate update parameters. type CertificateUpdateParameters struct { // CertificatePolicy - The management policy for the certificate. @@ -1301,6 +1335,216 @@ func NewDeletedKeyListResultPage(getNextPage func(context.Context, DeletedKeyLis return DeletedKeyListResultPage{fn: getNextPage} } +// DeletedSasDefinitionBundle a deleted SAS definition bundle consisting of its previous id, attributes and +// its tags, as well as information on when it will be purged. +type DeletedSasDefinitionBundle struct { + autorest.Response `json:"-"` + // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - READ-ONLY; The SAS definition id. + ID *string `json:"id,omitempty"` + // SecretID - READ-ONLY; Storage account SAS definition secret id. + SecretID *string `json:"sid,omitempty"` + // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. + TemplateURI *string `json:"templateUri,omitempty"` + // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' + SasType SasTokenType `json:"sasType,omitempty"` + // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. + ValidityPeriod *string `json:"validityPeriod,omitempty"` + // Attributes - READ-ONLY; The SAS definition attributes. + Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DeletedSasDefinitionBundle. +func (dsdb DeletedSasDefinitionBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsdb.RecoveryID != nil { + objectMap["recoveryId"] = dsdb.RecoveryID + } + return json.Marshal(objectMap) +} + +// DeletedSasDefinitionItem the deleted SAS definition item containing metadata about the deleted SAS +// definition. +type DeletedSasDefinitionItem struct { + // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - READ-ONLY; The storage SAS identifier. + ID *string `json:"id,omitempty"` + // SecretID - READ-ONLY; The storage account SAS definition secret id. + SecretID *string `json:"sid,omitempty"` + // Attributes - READ-ONLY; The SAS definition management attributes. + Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DeletedSasDefinitionItem. +func (dsdi DeletedSasDefinitionItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsdi.RecoveryID != nil { + objectMap["recoveryId"] = dsdi.RecoveryID + } + return json.Marshal(objectMap) +} + +// DeletedSasDefinitionListResult the deleted SAS definition list result +type DeletedSasDefinitionListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of the deleted SAS definitions in the vault along with a link to the next page of deleted sas definitions + Value *[]DeletedSasDefinitionItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of deleted SAS definitions. + NextLink *string `json:"nextLink,omitempty"` +} + +// DeletedSasDefinitionListResultIterator provides access to a complete listing of DeletedSasDefinitionItem +// values. +type DeletedSasDefinitionListResultIterator struct { + i int + page DeletedSasDefinitionListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeletedSasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeletedSasDefinitionListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeletedSasDefinitionListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeletedSasDefinitionListResultIterator) Response() DeletedSasDefinitionListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeletedSasDefinitionListResultIterator) Value() DeletedSasDefinitionItem { + if !iter.page.NotDone() { + return DeletedSasDefinitionItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeletedSasDefinitionListResultIterator type. +func NewDeletedSasDefinitionListResultIterator(page DeletedSasDefinitionListResultPage) DeletedSasDefinitionListResultIterator { + return DeletedSasDefinitionListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dsdlr DeletedSasDefinitionListResult) IsEmpty() bool { + return dsdlr.Value == nil || len(*dsdlr.Value) == 0 +} + +// deletedSasDefinitionListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dsdlr DeletedSasDefinitionListResult) deletedSasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { + if dsdlr.NextLink == nil || len(to.String(dsdlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dsdlr.NextLink))) +} + +// DeletedSasDefinitionListResultPage contains a page of DeletedSasDefinitionItem values. +type DeletedSasDefinitionListResultPage struct { + fn func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error) + dsdlr DeletedSasDefinitionListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeletedSasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.dsdlr) + if err != nil { + return err + } + page.dsdlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeletedSasDefinitionListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeletedSasDefinitionListResultPage) NotDone() bool { + return !page.dsdlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeletedSasDefinitionListResultPage) Response() DeletedSasDefinitionListResult { + return page.dsdlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeletedSasDefinitionListResultPage) Values() []DeletedSasDefinitionItem { + if page.dsdlr.IsEmpty() { + return nil + } + return *page.dsdlr.Value +} + +// Creates a new instance of the DeletedSasDefinitionListResultPage type. +func NewDeletedSasDefinitionListResultPage(getNextPage func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error)) DeletedSasDefinitionListResultPage { + return DeletedSasDefinitionListResultPage{fn: getNextPage} +} + // DeletedSecretBundle a Deleted Secret consisting of its previous id, attributes and its tags, as well as // information on when it will be purged. type DeletedSecretBundle struct { @@ -1538,6 +1782,216 @@ func NewDeletedSecretListResultPage(getNextPage func(context.Context, DeletedSec return DeletedSecretListResultPage{fn: getNextPage} } +// DeletedStorageAccountItem the deleted storage account item containing metadata about the deleted storage +// account. +type DeletedStorageAccountItem struct { + // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - READ-ONLY; Storage identifier. + ID *string `json:"id,omitempty"` + // ResourceID - READ-ONLY; Storage account resource Id. + ResourceID *string `json:"resourceId,omitempty"` + // Attributes - READ-ONLY; The storage account management attributes. + Attributes *StorageAccountAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DeletedStorageAccountItem. +func (dsai DeletedStorageAccountItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsai.RecoveryID != nil { + objectMap["recoveryId"] = dsai.RecoveryID + } + return json.Marshal(objectMap) +} + +// DeletedStorageBundle a deleted storage account bundle consisting of its previous id, attributes and its +// tags, as well as information on when it will be purged. +type DeletedStorageBundle struct { + autorest.Response `json:"-"` + // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. + RecoveryID *string `json:"recoveryId,omitempty"` + // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC + ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` + // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC + DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` + // ID - READ-ONLY; The storage account id. + ID *string `json:"id,omitempty"` + // ResourceID - READ-ONLY; The storage account resource id. + ResourceID *string `json:"resourceId,omitempty"` + // ActiveKeyName - READ-ONLY; The current active storage account key name. + ActiveKeyName *string `json:"activeKeyName,omitempty"` + // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. + AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` + // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. + RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` + // Attributes - READ-ONLY; The storage account attributes. + Attributes *StorageAccountAttributes `json:"attributes,omitempty"` + // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for DeletedStorageBundle. +func (dsb DeletedStorageBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsb.RecoveryID != nil { + objectMap["recoveryId"] = dsb.RecoveryID + } + return json.Marshal(objectMap) +} + +// DeletedStorageListResult the deleted storage account list result +type DeletedStorageListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; A response message containing a list of the deleted storage accounts in the vault along with a link to the next page of deleted storage accounts + Value *[]DeletedStorageAccountItem `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to get the next set of deleted storage accounts. + NextLink *string `json:"nextLink,omitempty"` +} + +// DeletedStorageListResultIterator provides access to a complete listing of DeletedStorageAccountItem +// values. +type DeletedStorageListResultIterator struct { + i int + page DeletedStorageListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeletedStorageListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeletedStorageListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeletedStorageListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeletedStorageListResultIterator) Response() DeletedStorageListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeletedStorageListResultIterator) Value() DeletedStorageAccountItem { + if !iter.page.NotDone() { + return DeletedStorageAccountItem{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeletedStorageListResultIterator type. +func NewDeletedStorageListResultIterator(page DeletedStorageListResultPage) DeletedStorageListResultIterator { + return DeletedStorageListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dslr DeletedStorageListResult) IsEmpty() bool { + return dslr.Value == nil || len(*dslr.Value) == 0 +} + +// deletedStorageListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dslr DeletedStorageListResult) deletedStorageListResultPreparer(ctx context.Context) (*http.Request, error) { + if dslr.NextLink == nil || len(to.String(dslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dslr.NextLink))) +} + +// DeletedStorageListResultPage contains a page of DeletedStorageAccountItem values. +type DeletedStorageListResultPage struct { + fn func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error) + dslr DeletedStorageListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeletedStorageListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.dslr) + if err != nil { + return err + } + page.dslr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeletedStorageListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeletedStorageListResultPage) NotDone() bool { + return !page.dslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeletedStorageListResultPage) Response() DeletedStorageListResult { + return page.dslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeletedStorageListResultPage) Values() []DeletedStorageAccountItem { + if page.dslr.IsEmpty() { + return nil + } + return *page.dslr.Value +} + +// Creates a new instance of the DeletedStorageListResultPage type. +func NewDeletedStorageListResultPage(getNextPage func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error)) DeletedStorageListResultPage { + return DeletedStorageListResultPage{fn: getNextPage} +} + // Error the key vault server error. type Error struct { // Code - READ-ONLY; The error code. @@ -1593,13 +2047,15 @@ type IssuerParameters struct { Name *string `json:"name,omitempty"` // CertificateType - Type of certificate to be requested from the issuer provider. CertificateType *string `json:"cty,omitempty"` + // CertificateTransparency - Indicates if the certificates generated under this policy should be published to certificate transparency logs. + CertificateTransparency *bool `json:"cert_transparency,omitempty"` } // JSONWebKey as of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18 type JSONWebKey struct { // Kid - Key identifier. Kid *string `json:"kid,omitempty"` - // Kty - JsonWebKey key type (kty). Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' + // Kty - JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' Kty JSONWebKeyType `json:"kty,omitempty"` KeyOps *[]string `json:"key_ops,omitempty"` // N - RSA modulus. (a URL-encoded base64 string) @@ -1622,7 +2078,7 @@ type JSONWebKey struct { K *string `json:"k,omitempty"` // T - HSM Token, used with 'Bring Your Own Key'. (a URL-encoded base64 string) T *string `json:"key_hsm,omitempty"` - // Crv - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'SECP256K1' + // Crv - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' Crv JSONWebKeyCurveName `json:"crv,omitempty"` // X - X component of an EC public key. (a URL-encoded base64 string) X *string `json:"x,omitempty"` @@ -1684,7 +2140,7 @@ type KeyCreateParameters struct { KeyAttributes *KeyAttributes `json:"attributes,omitempty"` // Tags - Application specific metadata in the form of key-value pairs. Tags map[string]*string `json:"tags"` - // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'SECP256K1' + // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' Curve JSONWebKeyCurveName `json:"crv,omitempty"` } @@ -1936,12 +2392,14 @@ type KeyOperationsParameters struct { type KeyProperties struct { // Exportable - Indicates if the private key can be exported. Exportable *bool `json:"exportable,omitempty"` - // KeyType - The key type. - KeyType *string `json:"kty,omitempty"` + // KeyType - The type of key pair to be used for the certificate. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' + KeyType JSONWebKeyType `json:"kty,omitempty"` // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. KeySize *int32 `json:"key_size,omitempty"` // ReuseKey - Indicates if the same key pair will be used on certificate renewal. ReuseKey *bool `json:"reuse_key,omitempty"` + // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' + Curve JSONWebKeyCurveName `json:"crv,omitempty"` } // KeyRestoreParameters the key restore parameters. @@ -1952,7 +2410,7 @@ type KeyRestoreParameters struct { // KeySignParameters the key operations parameters. type KeySignParameters struct { - // Algorithm - The signing/verification algorithm identifier. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ECDSA256' + // Algorithm - The signing/verification algorithm identifier. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` // Value - a URL-encoded base64 string Value *string `json:"value,omitempty"` @@ -1984,7 +2442,7 @@ func (kup KeyUpdateParameters) MarshalJSON() ([]byte, error) { // KeyVerifyParameters the key verify parameters. type KeyVerifyParameters struct { - // Algorithm - The signing/verification algorithm. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ECDSA256' + // Algorithm - The signing/verification algorithm. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` // Digest - The digest used for signing. (a URL-encoded base64 string) Digest *string `json:"digest,omitempty"` @@ -2030,6 +2488,8 @@ type SasDefinitionAttributes struct { Created *date.UnixTime `json:"created,omitempty"` // Updated - READ-ONLY; Last updated time in UTC. Updated *date.UnixTime `json:"updated,omitempty"` + // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for SAS definitions in the current vault. If it contains 'Purgeable' the SAS definition can be permanently deleted by a privileged user; otherwise, only the system can purge the SAS definition, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription' + RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` } // SasDefinitionBundle a SAS definition bundle consists of key vault SAS definition details plus its @@ -2040,8 +2500,12 @@ type SasDefinitionBundle struct { ID *string `json:"id,omitempty"` // SecretID - READ-ONLY; Storage account SAS definition secret id. SecretID *string `json:"sid,omitempty"` - // Parameters - READ-ONLY; The SAS definition metadata in the form of key-value pairs. - Parameters map[string]*string `json:"parameters"` + // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. + TemplateURI *string `json:"templateUri,omitempty"` + // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' + SasType SasTokenType `json:"sasType,omitempty"` + // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. + ValidityPeriod *string `json:"validityPeriod,omitempty"` // Attributes - READ-ONLY; The SAS definition attributes. Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs @@ -2056,8 +2520,12 @@ func (sdb SasDefinitionBundle) MarshalJSON() ([]byte, error) { // SasDefinitionCreateParameters the SAS definition create parameters. type SasDefinitionCreateParameters struct { - // Parameters - Sas definition creation metadata in the form of key-value pairs. - Parameters map[string]*string `json:"parameters"` + // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. + TemplateURI *string `json:"templateUri,omitempty"` + // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' + SasType SasTokenType `json:"sasType,omitempty"` + // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. + ValidityPeriod *string `json:"validityPeriod,omitempty"` // SasDefinitionAttributes - The attributes of the SAS definition. SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` // Tags - Application specific metadata in the form of key-value pairs. @@ -2067,8 +2535,14 @@ type SasDefinitionCreateParameters struct { // MarshalJSON is the custom marshaler for SasDefinitionCreateParameters. func (sdcp SasDefinitionCreateParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if sdcp.Parameters != nil { - objectMap["parameters"] = sdcp.Parameters + if sdcp.TemplateURI != nil { + objectMap["templateUri"] = sdcp.TemplateURI + } + if sdcp.SasType != "" { + objectMap["sasType"] = sdcp.SasType + } + if sdcp.ValidityPeriod != nil { + objectMap["validityPeriod"] = sdcp.ValidityPeriod } if sdcp.SasDefinitionAttributes != nil { objectMap["attributes"] = sdcp.SasDefinitionAttributes @@ -2245,8 +2719,12 @@ func NewSasDefinitionListResultPage(getNextPage func(context.Context, SasDefinit // SasDefinitionUpdateParameters the SAS definition update parameters. type SasDefinitionUpdateParameters struct { - // Parameters - Sas definition update metadata in the form of key-value pairs. - Parameters map[string]*string `json:"parameters"` + // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. + TemplateURI *string `json:"templateUri,omitempty"` + // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' + SasType SasTokenType `json:"sasType,omitempty"` + // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. + ValidityPeriod *string `json:"validityPeriod,omitempty"` // SasDefinitionAttributes - The attributes of the SAS definition. SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` // Tags - Application specific metadata in the form of key-value pairs. @@ -2256,8 +2734,14 @@ type SasDefinitionUpdateParameters struct { // MarshalJSON is the custom marshaler for SasDefinitionUpdateParameters. func (sdup SasDefinitionUpdateParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if sdup.Parameters != nil { - objectMap["parameters"] = sdup.Parameters + if sdup.TemplateURI != nil { + objectMap["templateUri"] = sdup.TemplateURI + } + if sdup.SasType != "" { + objectMap["sasType"] = sdup.SasType + } + if sdup.ValidityPeriod != nil { + objectMap["validityPeriod"] = sdup.ValidityPeriod } if sdup.SasDefinitionAttributes != nil { objectMap["attributes"] = sdup.SasDefinitionAttributes @@ -2577,6 +3061,8 @@ type StorageAccountAttributes struct { Created *date.UnixTime `json:"created,omitempty"` // Updated - READ-ONLY; Last updated time in UTC. Updated *date.UnixTime `json:"updated,omitempty"` + // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for storage accounts in the current vault. If it contains 'Purgeable' the storage account can be permanently deleted by a privileged user; otherwise, only the system can purge the storage account, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription' + RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` } // StorageAccountCreateParameters the storage account create parameters. @@ -2850,6 +3336,12 @@ func NewStorageListResultPage(getNextPage func(context.Context, StorageListResul return StorageListResultPage{fn: getNextPage} } +// StorageRestoreParameters the secret restore parameters. +type StorageRestoreParameters struct { + // StorageBundleBackup - The backup blob associated with a storage account. (a URL-encoded base64 string) + StorageBundleBackup *string `json:"value,omitempty"` +} + // SubjectAlternativeNames the subject alternate names of a X509 object. type SubjectAlternativeNames struct { // Emails - Email addresses. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go similarity index 93% rename from vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/version.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go index fb6a25c730..2db61adc28 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go @@ -21,7 +21,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " keyvault/2016-10-01" + return "Azure-SDK-For-Go/" + version.Number + " keyvault/7.0" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/classicadministrators.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/classicadministrators.go index c66bfbaf60..c0dc1eeeb4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/classicadministrators.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/classicadministrators.go @@ -96,8 +96,8 @@ func (client ClassicAdministratorsClient) ListPreparer(ctx context.Context) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ClassicAdministratorsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/globaladministrator.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/globaladministrator.go new file mode 100644 index 0000000000..ec555ff001 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/globaladministrator.go @@ -0,0 +1,108 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// GlobalAdministratorClient is the client for the GlobalAdministrator methods of the Authorization service. +type GlobalAdministratorClient struct { + BaseClient +} + +// NewGlobalAdministratorClient creates an instance of the GlobalAdministratorClient client. +func NewGlobalAdministratorClient(subscriptionID string) GlobalAdministratorClient { + return NewGlobalAdministratorClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGlobalAdministratorClientWithBaseURI creates an instance of the GlobalAdministratorClient client. +func NewGlobalAdministratorClientWithBaseURI(baseURI string, subscriptionID string) GlobalAdministratorClient { + return GlobalAdministratorClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ElevateAccess elevates access for a Global Administrator. +func (client GlobalAdministratorClient) ElevateAccess(ctx context.Context) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GlobalAdministratorClient.ElevateAccess") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ElevateAccessPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.GlobalAdministratorClient", "ElevateAccess", nil, "Failure preparing request") + return + } + + resp, err := client.ElevateAccessSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "authorization.GlobalAdministratorClient", "ElevateAccess", resp, "Failure sending request") + return + } + + result, err = client.ElevateAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.GlobalAdministratorClient", "ElevateAccess", resp, "Failure responding to request") + } + + return +} + +// ElevateAccessPreparer prepares the ElevateAccess request. +func (client GlobalAdministratorClient) ElevateAccessPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2015-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Authorization/elevateAccess"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ElevateAccessSender sends the ElevateAccess request. The method will close the +// http.Response Body if it receives an error. +func (client GlobalAdministratorClient) ElevateAccessSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ElevateAccessResponder handles the response to the ElevateAccess request. The method always +// closes the http.Response Body. +func (client GlobalAdministratorClient) ElevateAccessResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/permissions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/permissions.go index 4fcabb91de..c99b1e4ae6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/permissions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/permissions.go @@ -107,8 +107,8 @@ func (client PermissionsClient) ListForResourcePreparer(ctx context.Context, res // ListForResourceSender sends the ListForResource request. The method will close the // http.Response Body if it receives an error. func (client PermissionsClient) ListForResourceSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListForResourceResponder handles the response to the ListForResource request. The method always @@ -220,8 +220,8 @@ func (client PermissionsClient) ListForResourceGroupPreparer(ctx context.Context // ListForResourceGroupSender sends the ListForResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client PermissionsClient) ListForResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListForResourceGroupResponder handles the response to the ListForResourceGroup request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/provideroperationsmetadata.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/provideroperationsmetadata.go index c1555423f6..4e915dca25 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/provideroperationsmetadata.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/provideroperationsmetadata.go @@ -104,8 +104,8 @@ func (client ProviderOperationsMetadataClient) GetPreparer(ctx context.Context, // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ProviderOperationsMetadataClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -180,8 +180,8 @@ func (client ProviderOperationsMetadataClient) ListPreparer(ctx context.Context, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ProviderOperationsMetadataClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roleassignments.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roleassignments.go index 93f32d4cf6..39954b869d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roleassignments.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roleassignments.go @@ -116,8 +116,8 @@ func (client RoleAssignmentsClient) CreatePreparer(ctx context.Context, scope st // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) CreateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateResponder handles the response to the Create request. The method always @@ -202,8 +202,8 @@ func (client RoleAssignmentsClient) CreateByIDPreparer(ctx context.Context, role // CreateByIDSender sends the CreateByID request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) CreateByIDSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateByIDResponder handles the response to the CreateByID request. The method always @@ -278,8 +278,8 @@ func (client RoleAssignmentsClient) DeletePreparer(ctx context.Context, scope st // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always @@ -352,8 +352,8 @@ func (client RoleAssignmentsClient) DeleteByIDPreparer(ctx context.Context, role // DeleteByIDSender sends the DeleteByID request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) DeleteByIDSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteByIDResponder handles the response to the DeleteByID request. The method always @@ -428,8 +428,8 @@ func (client RoleAssignmentsClient) GetPreparer(ctx context.Context, scope strin // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -502,8 +502,8 @@ func (client RoleAssignmentsClient) GetByIDPreparer(ctx context.Context, roleID // GetByIDSender sends the GetByID request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) GetByIDSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetByIDResponder handles the response to the GetByID request. The method always @@ -582,8 +582,8 @@ func (client RoleAssignmentsClient) ListPreparer(ctx context.Context, filter str // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always @@ -709,8 +709,8 @@ func (client RoleAssignmentsClient) ListForResourcePreparer(ctx context.Context, // ListForResourceSender sends the ListForResource request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) ListForResourceSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListForResourceResponder handles the response to the ListForResource request. The method always @@ -828,8 +828,8 @@ func (client RoleAssignmentsClient) ListForResourceGroupPreparer(ctx context.Con // ListForResourceGroupSender sends the ListForResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) ListForResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) } // ListForResourceGroupResponder handles the response to the ListForResourceGroup request. The method always @@ -946,8 +946,8 @@ func (client RoleAssignmentsClient) ListForScopePreparer(ctx context.Context, sc // ListForScopeSender sends the ListForScope request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) ListForScopeSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListForScopeResponder handles the response to the ListForScope request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roledefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roledefinitions.go index 8156456638..4c42845fdf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roledefinitions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roledefinitions.go @@ -105,8 +105,8 @@ func (client RoleDefinitionsClient) CreateOrUpdatePreparer(ctx context.Context, // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -181,8 +181,8 @@ func (client RoleDefinitionsClient) DeletePreparer(ctx context.Context, scope st // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always @@ -257,8 +257,8 @@ func (client RoleDefinitionsClient) GetPreparer(ctx context.Context, scope strin // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always @@ -334,8 +334,8 @@ func (client RoleDefinitionsClient) GetByIDPreparer(ctx context.Context, roleID // GetByIDSender sends the GetByID request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) GetByIDSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // GetByIDResponder handles the response to the GetByID request. The method always @@ -414,8 +414,8 @@ func (client RoleDefinitionsClient) ListPreparer(ctx context.Context, scope stri // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go index 31894dbfc2..62e461a559 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go @@ -107,7 +107,7 @@ func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonic if options.UseHTTPS { protocols = "https" } - stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers) + stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, signedResource, "", headers) if err != nil { return "", err } @@ -149,7 +149,7 @@ func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonic return sasURL.String(), nil } -func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) { +func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime string, headers OverrideHeaders) (string, error) { rscc := headers.CacheControl rscd := headers.ContentDisposition rsce := headers.ContentEncoding @@ -160,6 +160,11 @@ func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonical canonicalizedResource = "/blob" + canonicalizedResource } + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + if signedVersion >= "2018-11-09" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime, rscc, rscd, rsce, rscl, rsct), nil + } + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 if signedVersion >= "2015-04-05" { return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go index c9c62d799a..bd19eccc41 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -197,6 +197,47 @@ func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, o return b.respondCreation(resp, BlobTypeBlock) } +// PutBlockFromURLOptions includes the options for a put block from URL operation +type PutBlockFromURLOptions struct { + PutBlockOptions + + SourceContentMD5 string `header:"x-ms-source-content-md5"` + SourceContentCRC64 string `header:"x-ms-source-content-crc64"` +} + +// PutBlockFromURL copy data of exactly specified size from specified URL to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes from a remote URL and the offset and length is known in advance. +// +// The API rejects requests with size > 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url +func (b *Blob) PutBlockFromURL(blockID string, blobURL string, offset int64, size uint64, options *PutBlockFromURLOptions) error { + query := url.Values{ + "comp": {"block"}, + "blockid": {blockID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + // The value of this header must be set to zero. + // When the length is not zero, the operation will fail with the status code 400 (Bad Request). + headers["Content-Length"] = "0" + headers["x-ms-copy-source"] = blobURL + headers["x-ms-source-range"] = fmt.Sprintf("bytes=%d-%d", offset, uint64(offset)+size-1) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + // PutBlockListOptions includes the options for a put block list operation type PutBlockListOptions struct { Timeout uint diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index 427558b5d6..99702effe1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -46,7 +46,7 @@ const ( // DefaultAPIVersion is the Azure Storage API version string used when a // basic client is created. - DefaultAPIVersion = "2016-05-31" + DefaultAPIVersion = "2018-03-28" defaultUseHTTPS = true defaultRetryAttempts = 5 @@ -367,11 +367,14 @@ func newSASClient(accountName, baseURL string, sasToken url.Values) Client { accountName: accountName, baseURL: baseURL, accountSASToken: sasToken, + useHTTPS: defaultUseHTTPS, } c.userAgent = c.getDefaultUserAgent() // Get API version and protocol from token c.apiVersion = sasToken.Get("sv") - c.useHTTPS = sasToken.Get("spr") == "https" + if spr := sasToken.Get("spr"); spr != "" { + c.useHTTPS = spr == "https" + } return c } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go index 06bbe4ba08..6a480b12a8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -29,7 +29,11 @@ const fourMB = uint64(4194304) const oneTB = uint64(1099511627776) // Export maximum range and file sizes + +// MaxRangeSize defines the maximum size in bytes for a file range. const MaxRangeSize = fourMB + +// MaxFileSize defines the maximum size in bytes for a file. const MaxFileSize = oneTB // File represents a file on a share. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go index c338975ab5..dc41992227 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -22,10 +22,12 @@ import ( // ServiceProperties represents the storage account service properties type ServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors + DeleteRetentionPolicy *RetentionPolicy // blob storage only + StaticWebsite *StaticWebsite // blob storage only } // Logging represents the Azure Analytics Logging settings @@ -65,6 +67,16 @@ type CorsRule struct { AllowedHeaders string } +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // Enabled - Indicates whether this account is hosting a static website + Enabled bool + // IndexDocument - The default name of the index page under each directory + IndexDocument *string + // ErrorDocument404Path - The absolute path of the custom 404 page + ErrorDocument404Path *string +} + func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { query := url.Values{ "restype": {"service"}, @@ -102,10 +114,12 @@ func (c Client) setServiceProperties(props ServiceProperties, service string, au // Ideally, StorageServiceProperties would be the output struct // This is to avoid golint stuttering, while generating the correct XML type StorageServiceProperties struct { - Logging *Logging - HourMetrics *Metrics - MinuteMetrics *Metrics - Cors *Cors + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors + DeleteRetentionPolicy *RetentionPolicy + StaticWebsite *StaticWebsite } input := StorageServiceProperties{ Logging: props.Logging, @@ -113,6 +127,11 @@ func (c Client) setServiceProperties(props ServiceProperties, service string, au MinuteMetrics: props.MinuteMetrics, Cors: props.Cors, } + // only set these fields for blob storage else it's invalid XML + if service == blobServiceName { + input.DeleteRetentionPolicy = props.DeleteRetentionPolicy + input.StaticWebsite = props.StaticWebsite + } body, length, err := xmlMarshal(input) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go index 22d9b4f5c1..0febf077f6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -355,8 +355,12 @@ func (t *Table) queryEntities(uri string, headers map[string]string, ml Metadata return nil, err } v := originalURI.Query() - v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) - v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) + if contToken.NextPartitionKey != "" { + v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) + } + if contToken.NextRowKey != "" { + v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) + } newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) entities.NextLink = &newURI entities.ml = ml @@ -371,7 +375,7 @@ func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { NextRowKey: h.Get(headerNextRowKey), } - if ct.NextPartitionKey != "" && ct.NextRowKey != "" { + if ct.NextPartitionKey != "" || ct.NextRowKey != "" { return &ct } return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index 2f58cd8cfd..073281bb85 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -18,4 +18,4 @@ package version // Changes may cause incorrect behavior and will be lost if the code is regenerated. // Number contains the semantic version of this SDK. -const Number = "v29.0.0" +const Number = "v36.2.0" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE similarity index 100% rename from vendor/github.com/Azure/go-autorest/LICENSE rename to vendor/github.com/Azure/go-autorest/autorest/LICENSE diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE similarity index 94% rename from vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE rename to vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE index 261eeb9e9f..b9d6a27ea9 100644 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -175,18 +176,7 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2015 Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md index 7b0c4bc4d2..fec416a9c4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -135,7 +135,7 @@ resource := "https://management.core.windows.net/" applicationSecret := "APPLICATION_SECRET" spt, err := adal.NewServicePrincipalToken( - oauthConfig, + *oauthConfig, appliationID, applicationSecret, resource, @@ -170,7 +170,7 @@ if err != nil { } spt, err := adal.NewServicePrincipalTokenFromCertificate( - oauthConfig, + *oauthConfig, applicationID, certificate, rsaPrivateKey, @@ -195,7 +195,7 @@ oauthClient := &http.Client{} // Acquire the device code deviceCode, err := adal.InitiateDeviceAuth( oauthClient, - oauthConfig, + *oauthConfig, applicationID, resource) if err != nil { @@ -212,7 +212,7 @@ if err != nil { } spt, err := adal.NewServicePrincipalTokenFromManualToken( - oauthConfig, + *oauthConfig, applicationID, resource, *token, @@ -227,7 +227,7 @@ if (err == nil) { ```Go spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( - oauthConfig, + *oauthConfig, applicationID, username, password, @@ -243,11 +243,11 @@ if (err == nil) { ``` Go spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( - oauthConfig, + *oauthConfig, applicationID, clientSecret, - authorizationCode, - redirectURI, + authorizationCode, + redirectURI, resource, callbacks...) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index 8c83a917ff..fa5964742f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -15,10 +15,15 @@ package adal // limitations under the License. import ( + "errors" "fmt" "net/url" ) +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + // OAuthConfig represents the endpoints needed // in OAuth operations type OAuthConfig struct { @@ -60,7 +65,6 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV } api = fmt.Sprintf("?api-version=%s", *apiVersion) } - const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" u, err := url.Parse(activeDirectoryEndpoint) if err != nil { return nil, err @@ -89,3 +93,59 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV DeviceCodeEndpoint: *deviceCodeURL, }, nil } + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 0000000000..fdc5b90ca5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.0 + github.com/Azure/go-autorest/autorest/date v0.2.0 + github.com/Azure/go-autorest/autorest/mocks v0.3.0 + github.com/Azure/go-autorest/tracing v0.5.0 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 0000000000..f0a018563b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,23 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 0000000000..28a4bfc4c4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go index 834401e00d..d7e4372bbc 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -15,7 +15,12 @@ package adal // limitations under the License. import ( + "crypto/tls" "net/http" + "net/http/cookiejar" + "sync" + + "github.com/Azure/go-autorest/tracing" ) const ( @@ -23,6 +28,9 @@ const ( mimeTypeFormPost = "application/x-www-form-urlencoded" ) +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + // Sender is the interface that wraps the Do method to send HTTP requests. // // The standard http.Client conforms to this interface. @@ -45,7 +53,7 @@ type SendDecorator func(Sender) Sender // CreateSender creates, decorates, and returns, as a Sender, the default http.Client. func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) + return DecorateSender(sender(), decorators...) } // DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to @@ -58,3 +66,30 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender { } return s } + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index effa87ab2f..7c7fca3718 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -26,15 +26,14 @@ import ( "fmt" "io/ioutil" "math" - "net" "net/http" "net/url" + "os" "strings" "sync" "time" "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/tracing" "github.com/dgrijalva/jwt-go" ) @@ -64,6 +63,12 @@ const ( // the default number of attempts to refresh an MSI authentication token defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + asMSIEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + asMSISecretEnv = "MSI_SECRET" ) // OAuthTokenProvider is an interface which should be implemented by an access token retriever @@ -71,6 +76,12 @@ type OAuthTokenProvider interface { OAuthToken() string } +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + // TokenRefreshError is an interface used by errors returned during token refresh. type TokenRefreshError interface { error @@ -390,7 +401,7 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { spt.refreshLock = &sync.RWMutex{} } if spt.sender == nil { - spt.sender = &http.Client{Transport: tracing.Transport} + spt.sender = sender() } return nil } @@ -438,7 +449,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso RefreshWithin: defaultRefresh, }, refreshLock: &sync.RWMutex{}, - sender: &http.Client{Transport: tracing.Transport}, + sender: sender(), refreshCallbacks: callbacks, } return spt, nil @@ -629,6 +640,31 @@ func GetMSIVMEndpoint() (string, error) { return msiEndpoint, nil } +func isAppService() bool { + _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv) + + return asMSIEndpointEnvExists && asMSISecretEnvExists +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions +func GetMSIAppServiceEndpoint() (string, error) { + asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + + if asMSIEndpointEnvExists { + return asMSIEndpoint, nil + } + return "", errors.New("MSI endpoint not found") +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +func GetMSIEndpoint() (string, error) { + if isAppService() { + return GetMSIAppServiceEndpoint() + } + return GetMSIVMEndpoint() +} + // NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. // It will use the system assigned identity when creating the token. func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { @@ -661,7 +697,12 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI v := url.Values{} v.Set("resource", resource) - v.Set("api-version", "2018-02-01") + // App Service MSI currently only supports token API version 2017-09-01 + if isAppService() { + v.Set("api-version", "2017-09-01") + } else { + v.Set("api-version", "2018-02-01") + } if userAssignedID != nil { v.Set("client_id", *userAssignedID) } @@ -679,7 +720,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI RefreshWithin: defaultRefresh, }, refreshLock: &sync.RWMutex{}, - sender: &http.Client{Transport: tracing.Transport}, + sender: sender(), refreshCallbacks: callbacks, MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, } @@ -788,7 +829,7 @@ func isIMDS(u url.URL) bool { if err != nil { return false } - return u.Host == imds.Host && u.Path == imds.Path + return (u.Host == imds.Host && u.Path == imds.Path) || isAppService() } func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { @@ -797,6 +838,11 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) } req.Header.Add("User-Agent", UserAgent()) + // Add header when runtime is on App Service or Functions + if isAppService() { + asMSISecret, _ := os.LookupEnv(asMSISecretEnv) + req.Header.Add("Secret", asMSISecret) + } req = req.WithContext(ctx) if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { v := url.Values{} @@ -841,7 +887,8 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource resp, err = spt.sender.Do(req) } if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) + // don't return a TokenRefreshError here; this will allow retry logic to apply + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) } defer resp.Body.Close() @@ -908,10 +955,8 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http for attempt < maxAttempts { resp, err = sender.Do(req) - // retry on temporary network errors, e.g. transient network failures. - // if we don't receive a response then assume we can't connect to the - // endpoint so we're likely not running on an Azure VM so don't retry. - if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { return } @@ -935,20 +980,12 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http return } -// returns true if the specified error is a temporary network error or false if it's not. -// if the error doesn't implement the net.Error interface the return value is true. -func isTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} - -// returns true if slice ints contains the value n -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } } } return false @@ -983,3 +1020,93 @@ func (spt *ServicePrincipalToken) Token() Token { defer spt.refreshLock.RUnlock() return spt.inner.Token } + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index 2e24b4b397..54e87b5b64 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -15,6 +15,7 @@ package autorest // limitations under the License. import ( + "crypto/tls" "encoding/base64" "fmt" "net/http" @@ -22,7 +23,6 @@ import ( "strings" "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/tracing" ) const ( @@ -149,11 +149,11 @@ type BearerAuthorizerCallback struct { // NewBearerAuthorizerCallback creates a bearer authorization callback. The callback // is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if sender == nil { - sender = &http.Client{Transport: tracing.Transport} +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) } - return &BearerAuthorizerCallback{sender: sender, callback: callback} + return &BearerAuthorizerCallback{sender: s, callback: callback} } // WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value @@ -285,3 +285,52 @@ func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() } + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return &multiTenantSPTAuthorizer{tp: tp} +} + +type multiTenantSPTAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 0041eacf75..1cb41cbeb1 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -45,14 +45,7 @@ var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.Stat // Future provides a mechanism to access the status and results of an asynchronous request. // Since futures are stateful they should be passed by value to avoid race conditions. type Future struct { - req *http.Request // legacy - pt pollingTracker -} - -// NewFuture returns a new Future object initialized with the specified request. -// Deprecated: Please use NewFutureFromResponse instead. -func NewFuture(req *http.Request) Future { - return Future{req: req} + pt pollingTracker } // NewFutureFromResponse returns a new Future object initialized @@ -86,12 +79,6 @@ func (f Future) PollingMethod() PollingMethodType { return f.pt.pollingMethod() } -// Done queries the service to see if the operation has completed. -// Deprecated: Use DoneWithContext() -func (f *Future) Done(sender autorest.Sender) (bool, error) { - return f.DoneWithContext(context.Background(), sender) -} - // DoneWithContext queries the service to see if the operation has completed. func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") @@ -104,20 +91,6 @@ func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (d tracing.EndSpan(ctx, sc, err) }() - // support for legacy Future implementation - if f.req != nil { - resp, err := sender.Do(f.req) - if err != nil { - return false, err - } - pt, err := createPollingTracker(resp) - if err != nil { - return false, err - } - f.pt = pt - f.req = nil - } - // end legacy if f.pt == nil { return false, autorest.NewError("Future", "Done", "future is not initialized") } @@ -168,15 +141,6 @@ func (f Future) GetPollingDelay() (time.Duration, bool) { return d, true } -// WaitForCompletion will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// Deprecated: Please use WaitForCompletionRef() instead. -func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { - return f.WaitForCompletionRef(ctx, client) -} - // WaitForCompletionRef will return when one of the following conditions is met: the long // running operation has completed, the provided context is cancelled, or the client's // polling duration has been exceeded. It will retry failed polling attempts based on @@ -453,6 +417,11 @@ func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest } req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } pt.resp, err = sender.Do(req) if err != nil { return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") @@ -919,43 +888,6 @@ func isValidURL(s string) bool { return err == nil && u.IsAbs() } -// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure -// long-running operation. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via -// the context associated with the http.Request. -// Deprecated: Prefer using Futures to allow for non-blocking async operations. -func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - return resp, err - } - if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { - return resp, nil - } - future, err := NewFutureFromResponse(resp) - if err != nil { - return resp, err - } - // retry until either the LRO completes or we receive an error - var done bool - for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) { - // check for Retry-After delay, if not present use the specified polling delay - if pd, ok := future.GetPollingDelay(); ok { - delay = pd - } - // wait until the delay elapses or the context is cancelled - if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed { - return future.Response(), - autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled") - } - } - return future.Response(), err - }) - } -} - // PollingMethodType defines a type used for enumerating polling mechanisms. type PollingMethodType string diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE similarity index 94% rename from vendor/github.com/census-instrumentation/opencensus-proto/LICENSE rename to vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE index d645695673..b9d6a27ea9 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE @@ -176,18 +176,7 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2015 Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go index 20855d4ab3..5f02026b39 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -40,6 +40,7 @@ import ( const ( SubscriptionID = "AZURE_SUBSCRIPTION_ID" TenantID = "AZURE_TENANT_ID" + AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS" ClientID = "AZURE_CLIENT_ID" ClientSecret = "AZURE_CLIENT_SECRET" CertificatePath = "AZURE_CERTIFICATE_PATH" @@ -96,6 +97,7 @@ func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) { } s.setValue(SubscriptionID) s.setValue(TenantID) + s.setValue(AuxiliaryTenantIDs) s.setValue(ClientID) s.setValue(ClientSecret) s.setValue(CertificatePath) @@ -145,6 +147,12 @@ func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsCon config := NewClientCredentialsConfig(clientID, secret, tenantID) config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint config.Resource = settings.Values[Resource] + if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok { + config.AuxTenants = strings.Split(auxTenants, ";") + for i := range config.AuxTenants { + config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i]) + } + } return config, nil } @@ -546,6 +554,7 @@ type ClientCredentialsConfig struct { ClientID string ClientSecret string TenantID string + AuxTenants []string AADEndpoint string Resource string } @@ -559,13 +568,29 @@ func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincip return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) } +// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) { + oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{}) + if err != nil { + return nil, err + } + return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + // Authorizer gets the authorizer from client credentials. func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { - spToken, err := ccc.ServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err) + if len(ccc.AuxTenants) == 0 { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil } - return autorest.NewBearerAuthorizer(spToken), nil + mtSPT, err := ccc.MultiTenantServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err) + } + return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil } // ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. @@ -690,7 +715,7 @@ type MSIConfig struct { // Authorizer gets the authorizer from MSI. func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { - msiEndpoint, err := adal.GetMSIVMEndpoint() + msiEndpoint, err := adal.GetMSIEndpoint() if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod new file mode 100644 index 0000000000..43fa1b7920 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/azure/auth + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.2 + github.com/Azure/go-autorest/autorest/adal v0.7.0 + github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 + github.com/dimchansky/utfbom v1.1.0 + golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum new file mode 100644 index 0000000000..c462a7d4d9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum @@ -0,0 +1,38 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go new file mode 100644 index 0000000000..2f09cd177a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod new file mode 100644 index 0000000000..03ad580d61 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/azure/cli + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.0 + github.com/Azure/go-autorest/autorest/adal v0.6.0 + github.com/Azure/go-autorest/autorest/date v0.2.0 + github.com/dimchansky/utfbom v1.1.0 + github.com/mitchellh/go-homedir v1.1.0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum new file mode 100644 index 0000000000..7a8b1f23f6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum @@ -0,0 +1,29 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go new file mode 100644 index 0000000000..618bed392f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 85d3202afe..6c20b8179a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -22,9 +22,14 @@ import ( "strings" ) -// EnvironmentFilepathName captures the name of the environment variable containing the path to the file -// to be used while populating the Azure Environment. -const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) var environments = map[string]Environment{ "AZURECHINACLOUD": ChinaCloud, @@ -33,29 +38,40 @@ var environments = map[string]Environment{ "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, } +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` +} + // Environment represents a set of endpoints for each of Azure's Clouds. type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` - TokenAudience string `json:"tokenAudience"` + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` } var ( @@ -82,6 +98,14 @@ var ( ContainerRegistryDNSSuffix: "azurecr.io", CosmosDBDNSSuffix: "documents.azure.com", TokenAudience: "https://management.azure.com/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + }, } // USGovernmentCloud is the cloud environment for the US Government @@ -107,6 +131,14 @@ var ( ContainerRegistryDNSSuffix: "azurecr.us", CosmosDBDNSSuffix: "documents.azure.us", TokenAudience: "https://management.usgovcloudapi.net/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + }, } // ChinaCloud is the cloud environment operated in China @@ -132,6 +164,14 @@ var ( ContainerRegistryDNSSuffix: "azurecr.cn", CosmosDBDNSSuffix: "documents.azure.cn", TokenAudience: "https://management.chinacloudapi.cn/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, } // GermanCloud is the cloud environment operated in Germany @@ -154,9 +194,17 @@ var ( ServiceBusEndpointSuffix: "servicebus.cloudapi.de", ServiceManagementVMDNSSuffix: "azurecloudapp.de", ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - // ContainerRegistryDNSSuffix: "", ACR not present yet in the German Cloud - CosmosDBDNSSuffix: "documents.microsoftazure.de", - TokenAudience: "https://management.microsoftazure.de/", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, } ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index 9520001fc5..1c6a0617a1 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -22,12 +22,10 @@ import ( "io/ioutil" "log" "net/http" - "net/http/cookiejar" "strings" "time" "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/tracing" ) const ( @@ -73,6 +71,22 @@ type Response struct { *http.Response `json:"-"` } +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + // LoggingInspector implements request and response inspectors that log the full request and // response to a supplied log. type LoggingInspector struct { @@ -170,6 +184,24 @@ type Client struct { // NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed // string. func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { c := Client{ PollingDelay: DefaultPollingDelay, PollingDuration: DefaultPollingDuration, @@ -177,7 +209,7 @@ func NewClientWithUserAgent(ua string) Client { RetryDuration: DefaultRetryDuration, UserAgent: UserAgent(), } - c.Sender = c.sender() + c.Sender = c.sender(renegotiation) c.AddToUserAgent(ua) return c } @@ -221,34 +253,17 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { return true, v }, }) - resp, err := SendWithSender(c.sender(), r) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) logger.Instance.WriteResponse(resp, logger.Filter{}) Respond(resp, c.ByInspecting()) return resp, err } // sender returns the Sender to which to send requests. -func (c Client) sender() Sender { +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { if c.Sender == nil { - // Use behaviour compatible with DefaultTransport, but require TLS minimum version. - var defaultTransport = http.DefaultTransport.(*http.Transport) - - tracing.Transport.Base = &http.Transport{ - Proxy: defaultTransport.Proxy, - DialContext: defaultTransport.DialContext, - MaxIdleConns: defaultTransport.MaxIdleConns, - IdleConnTimeout: defaultTransport.IdleConnTimeout, - TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, - ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - }, - } - - j, _ := cookiejar.New(nil) - return &http.Client{Jar: j, Transport: tracing.Transport} + return sender(renengotiation) } - return c.Sender } diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 0000000000..3adc4804c3 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 + +require github.com/Azure/go-autorest/autorest v0.9.0 diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum new file mode 100644 index 0000000000..9e2ee7a948 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -0,0 +1,16 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 0000000000..55adf930f4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 0000000000..ab2ae66ace --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/adal v0.5.0 + github.com/Azure/go-autorest/autorest/mocks v0.2.0 + github.com/Azure/go-autorest/logger v0.1.0 + github.com/Azure/go-autorest/tracing v0.5.0 + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 0000000000..729b99cd09 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,18 @@ +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index 6d67bd7337..6e8ed64eba 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -16,7 +16,9 @@ package autorest import ( "bytes" + "context" "encoding/json" + "encoding/xml" "fmt" "io" "io/ioutil" @@ -31,11 +33,33 @@ const ( mimeTypeOctetStream = "application/octet-stream" mimeTypeFormPost = "application/x-www-form-urlencoded" - headerAuthorization = "Authorization" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" ) +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + // Preparer is the interface that wraps the Prepare method. // // Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations @@ -190,6 +214,9 @@ func AsGet() PrepareDecorator { return WithMethod("GET") } // AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. func AsHead() PrepareDecorator { return WithMethod("HEAD") } +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + // AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } @@ -225,6 +252,25 @@ func WithBaseURL(baseURL string) PrepareDecorator { } } +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + // WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the // request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { @@ -377,6 +423,28 @@ func WithJSON(v interface{}) PrepareDecorator { } } +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + // WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path // is absolute (that is, it begins with a "/"), it replaces the existing path. func WithPath(path string) PrepareDecorator { @@ -455,7 +523,7 @@ func parseURL(u *url.URL, path string) (*url.URL, error) { // WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters // given in the supplied map (i.e., key=value). func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(queryParameters) + parameters := MapToValues(queryParameters) return func(p Preparer) Preparer { return PreparerFunc(func(r *http.Request) (*http.Request, error) { r, err := p.Prepare(r) @@ -463,14 +531,16 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato if r.URL == nil { return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") } - v := r.URL.Query() for key, value := range parameters { - d, err := url.QueryUnescape(value) - if err != nil { - return r, err + for i := range value { + d, err := url.QueryUnescape(value[i]) + if err != nil { + return r, err + } + value[i] = d } - v.Add(key, d) + v[key] = value } r.URL.RawQuery = v.Encode() } diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go index a908a0adb7..349e1963a2 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -153,6 +153,25 @@ func ByClosingIfError() RespondDecorator { } } +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + // ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the // response Body into the value pointed to by v. func ByUnmarshallingJSON(v interface{}) RespondDecorator { diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index 6665d7c006..5e595d7b1a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -15,16 +15,40 @@ package autorest // limitations under the License. import ( + "context" + "crypto/tls" "fmt" "log" "math" "net/http" + "net/http/cookiejar" "strconv" "time" "github.com/Azure/go-autorest/tracing" ) +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + // Sender is the interface that wraps the Do method to send HTTP requests. // // The standard http.Client conforms to this interface. @@ -47,7 +71,7 @@ type SendDecorator func(Sender) Sender // CreateSender creates, decorates, and returns, as a Sender, the default http.Client. func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) + return DecorateSender(sender(tls.RenegotiateNever), decorators...) } // DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to @@ -70,7 +94,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender { // // Send will not poll or retry requests. func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...) + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) } // SendWithSender sends the passed http.Request, through the provided Sender, returning the @@ -82,6 +106,29 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht return DecorateSender(s, decorators...).Do(r) } +func sender(renengotiation tls.RenegotiationSupport) Sender { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j, Transport: roundTripper} +} + // AfterDelay returns a SendDecorator that delays for the passed time.Duration before // invoking the Sender. The delay may be terminated by closing the optional channel on the // http.Request. If canceled, no further Senders are invoked. @@ -211,53 +258,73 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { // DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified // number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - for attempt := 0; attempt < attempts+1; { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - // if the error isn't temporary don't bother retrying - if err != nil && !IsTemporaryNetworkError(err) { - return nil, err - } - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { - return resp, err - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return resp, r.Context().Err() - } - // don't count a 429 against the number of attempts - // so that we continue to retry until it succeeds - if resp == nil || resp.StatusCode != http.StatusTooManyRequests { - attempt++ - } - } - return resp, err + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) }) } } -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in -// responses with status code 429 +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt := 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { if resp == nil { return false } - retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) - if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { select { - case <-time.After(time.Duration(retryAfter) * time.Second): + case <-time.After(dur): return true case <-cancel: return false @@ -317,8 +384,22 @@ func WithLogging(logger *log.Logger) SendDecorator { // Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt // count. func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } select { - case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + case <-time.After(d): return true case <-cancel: return false diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go index fdda2ce1aa..86694bd255 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -145,3 +145,8 @@ func Float64(i *float64) float64 { func Float64Ptr(i float64) *float64 { return &i } + +// ByteSlicePtr returns a pointer to the passed byte slice. +func ByteSlicePtr(b []byte) *[]byte { + return &b +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod new file mode 100644 index 0000000000..48fd8c6e57 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/to + +go 1.12 + +require github.com/Azure/go-autorest/autorest v0.9.0 diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.sum b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum new file mode 100644 index 0000000000..d7ee6b4623 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum @@ -0,0 +1,17 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go new file mode 100644 index 0000000000..8e82921070 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod new file mode 100644 index 0000000000..b3f9b6a096 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod @@ -0,0 +1,8 @@ +module github.com/Azure/go-autorest/autorest/validation + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.0 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum new file mode 100644 index 0000000000..6b9010a736 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum @@ -0,0 +1,24 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go new file mode 100644 index 0000000000..2b2668581e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go index ae987f8fae..65899b69b8 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -398,11 +398,3 @@ func toInt64(v interface{}) (int64, bool) { } return 0, false } - -// NewErrorWithValidationError appends package type and method name in -// validation error. -// -// Deprecated: Please use validation.NewError() instead. -func NewErrorWithValidationError(err error, packageType, method string) error { - return NewError(packageType, method, err.Error()) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index 773fb96125..7a71089c9c 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -19,7 +19,7 @@ import ( "runtime" ) -const number = "v11.7.1" +const number = "v13.0.2" var ( userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 0000000000..f22ed56bcd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 0000000000..25c34c1085 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go index cd61cb18b6..0e7a6e9625 100644 --- a/vendor/github.com/Azure/go-autorest/tracing/tracing.go +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -16,175 +16,52 @@ package tracing import ( "context" - "fmt" "net/http" - "os" - - "contrib.go.opencensus.io/exporter/ocagent" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" ) +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + var ( - // Transport is the default tracing RoundTripper. The custom options setter will control - // if traces are being emitted or not. - Transport = &ochttp.Transport{ - Propagation: &tracecontext.HTTPFormat{}, - GetStartOptions: getStartOptions, - } - - // enabled is the flag for marking if tracing is enabled. - enabled = false - - // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise - // it will be using the parent sampler or the default. - sampler = trace.NeverSample() - - // Views for metric instrumentation. - views = map[string]*view.View{} - - // the trace exporter - traceExporter trace.Exporter + tracer Tracer ) -func init() { - enableFromEnv() +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t } -func enableFromEnv() { - _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED") - _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD") - if ok || legacyOk { - agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT") - - if ok { - EnableWithAIForwarding(agentEndpoint) - } else { - Enable() - } - } -} - -// IsEnabled returns true if monitoring is enabled for the sdk. +// IsEnabled returns true if a Tracer has been registered. func IsEnabled() bool { - return enabled + return tracer != nil } -// Enable will start instrumentation for metrics and traces. -func Enable() error { - enabled = true - sampler = nil - - err := initStats() - return err +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil } -// Disable will disable instrumentation for metrics and traces. -func Disable() { - disableStats() - sampler = trace.NeverSample() - if traceExporter != nil { - trace.UnregisterExporter(traceExporter) - } - enabled = false -} - -// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder -// exporter making the metrics and traces available in app insights. -func EnableWithAIForwarding(agentEndpoint string) (err error) { - err = Enable() - if err != nil { - return err - } - - traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint)) - if err != nil { - return err - } - trace.RegisterExporter(traceExporter) - return -} - -// getStartOptions is the custom options setter for the ochttp package. -func getStartOptions(*http.Request) trace.StartOptions { - return trace.StartOptions{ - Sampler: sampler, - } -} - -// initStats registers the views for the http metrics -func initStats() (err error) { - clientViews := []*view.View{ - ochttp.ClientCompletedCount, - ochttp.ClientRoundtripLatencyDistribution, - ochttp.ClientReceivedBytesDistribution, - ochttp.ClientSentBytesDistribution, - } - for _, cv := range clientViews { - vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name) - views[vn] = cv.WithName(vn) - err = view.Register(views[vn]) - if err != nil { - return err - } - } - return -} - -// disableStats will unregister the previously registered metrics -func disableStats() { - for _, v := range views { - view.Unregister(v) - } -} - -// StartSpan starts a trace span +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. func StartSpan(ctx context.Context, name string) context.Context { - ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler)) + if tracer != nil { + return tracer.StartSpan(ctx, name) + } return ctx } -// EndSpan ends a previously started span stored in the context +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. func EndSpan(ctx context.Context, httpStatusCode int, err error) { - span := trace.FromContext(ctx) - - if span == nil { - return - } - - if err != nil { - span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)}) - } - span.End() -} - -// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined -// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status -func toTraceStatusCode(httpStatusCode int) int32 { - switch { - case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest: - return trace.StatusCodeOK - case httpStatusCode == http.StatusBadRequest: - return trace.StatusCodeInvalidArgument - case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated. - return trace.StatusCodeUnauthenticated - case httpStatusCode == http.StatusForbidden: - return trace.StatusCodePermissionDenied - case httpStatusCode == http.StatusNotFound: - return trace.StatusCodeNotFound - case httpStatusCode == http.StatusTooManyRequests: - return trace.StatusCodeResourceExhausted - case httpStatusCode == 499: - return trace.StatusCodeCancelled - case httpStatusCode == http.StatusNotImplemented: - return trace.StatusCodeUnimplemented - case httpStatusCode == http.StatusServiceUnavailable: - return trace.StatusCodeUnavailable - case httpStatusCode == http.StatusGatewayTimeout: - return trace.StatusCodeDeadlineExceeded - default: - return trace.StatusCodeUnknown + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) } } diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS deleted file mode 100644 index e068e731ea..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. \ No newline at end of file diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go deleted file mode 100644 index 12b578d068..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go +++ /dev/null @@ -1,356 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/agent/common/v1/common.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type LibraryInfo_Language int32 - -const ( - LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0 - LibraryInfo_CPP LibraryInfo_Language = 1 - LibraryInfo_C_SHARP LibraryInfo_Language = 2 - LibraryInfo_ERLANG LibraryInfo_Language = 3 - LibraryInfo_GO_LANG LibraryInfo_Language = 4 - LibraryInfo_JAVA LibraryInfo_Language = 5 - LibraryInfo_NODE_JS LibraryInfo_Language = 6 - LibraryInfo_PHP LibraryInfo_Language = 7 - LibraryInfo_PYTHON LibraryInfo_Language = 8 - LibraryInfo_RUBY LibraryInfo_Language = 9 -) - -var LibraryInfo_Language_name = map[int32]string{ - 0: "LANGUAGE_UNSPECIFIED", - 1: "CPP", - 2: "C_SHARP", - 3: "ERLANG", - 4: "GO_LANG", - 5: "JAVA", - 6: "NODE_JS", - 7: "PHP", - 8: "PYTHON", - 9: "RUBY", -} - -var LibraryInfo_Language_value = map[string]int32{ - "LANGUAGE_UNSPECIFIED": 0, - "CPP": 1, - "C_SHARP": 2, - "ERLANG": 3, - "GO_LANG": 4, - "JAVA": 5, - "NODE_JS": 6, - "PHP": 7, - "PYTHON": 8, - "RUBY": 9, -} - -func (x LibraryInfo_Language) String() string { - return proto.EnumName(LibraryInfo_Language_name, int32(x)) -} - -func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{2, 0} -} - -// Identifier metadata of the Node that produces the span or tracing data. -// Note, this is not the metadata about the Node or service that is described by associated spans. -// In the future we plan to extend the identifier proto definition to support -// additional information (e.g cloud id, etc.) -type Node struct { - // Identifier that uniquely identifies a process within a VM/container. - Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` - // Information on the OpenCensus Library that initiates the stream. - LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"` - // Additional information on service. - ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"` - // Additional attributes. - Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{0} -} - -func (m *Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Node.Unmarshal(m, b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) -} -func (m *Node) XXX_Size() int { - return xxx_messageInfo_Node.Size(m) -} -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) -} - -var xxx_messageInfo_Node proto.InternalMessageInfo - -func (m *Node) GetIdentifier() *ProcessIdentifier { - if m != nil { - return m.Identifier - } - return nil -} - -func (m *Node) GetLibraryInfo() *LibraryInfo { - if m != nil { - return m.LibraryInfo - } - return nil -} - -func (m *Node) GetServiceInfo() *ServiceInfo { - if m != nil { - return m.ServiceInfo - } - return nil -} - -func (m *Node) GetAttributes() map[string]string { - if m != nil { - return m.Attributes - } - return nil -} - -// Identifier that uniquely identifies a process within a VM/container. -type ProcessIdentifier struct { - // The host name. Usually refers to the machine/container name. - // For example: os.Hostname() in Go, socket.gethostname() in Python. - HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Process id. - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - // Start time of this ProcessIdentifier. Represented in epoch time. - StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} } -func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) } -func (*ProcessIdentifier) ProtoMessage() {} -func (*ProcessIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{1} -} - -func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b) -} -func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic) -} -func (m *ProcessIdentifier) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessIdentifier.Merge(m, src) -} -func (m *ProcessIdentifier) XXX_Size() int { - return xxx_messageInfo_ProcessIdentifier.Size(m) -} -func (m *ProcessIdentifier) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m) -} - -var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo - -func (m *ProcessIdentifier) GetHostName() string { - if m != nil { - return m.HostName - } - return "" -} - -func (m *ProcessIdentifier) GetPid() uint32 { - if m != nil { - return m.Pid - } - return 0 -} - -func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { - if m != nil { - return m.StartTimestamp - } - return nil -} - -// Information on OpenCensus Library. -type LibraryInfo struct { - // Language of OpenCensus Library. - Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"` - // Version of Agent exporter of Library. - ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"` - // Version of OpenCensus Library. - CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LibraryInfo) Reset() { *m = LibraryInfo{} } -func (m *LibraryInfo) String() string { return proto.CompactTextString(m) } -func (*LibraryInfo) ProtoMessage() {} -func (*LibraryInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{2} -} - -func (m *LibraryInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LibraryInfo.Unmarshal(m, b) -} -func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic) -} -func (m *LibraryInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_LibraryInfo.Merge(m, src) -} -func (m *LibraryInfo) XXX_Size() int { - return xxx_messageInfo_LibraryInfo.Size(m) -} -func (m *LibraryInfo) XXX_DiscardUnknown() { - xxx_messageInfo_LibraryInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo - -func (m *LibraryInfo) GetLanguage() LibraryInfo_Language { - if m != nil { - return m.Language - } - return LibraryInfo_LANGUAGE_UNSPECIFIED -} - -func (m *LibraryInfo) GetExporterVersion() string { - if m != nil { - return m.ExporterVersion - } - return "" -} - -func (m *LibraryInfo) GetCoreLibraryVersion() string { - if m != nil { - return m.CoreLibraryVersion - } - return "" -} - -// Additional service information. -type ServiceInfo struct { - // Name of the service. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceInfo) Reset() { *m = ServiceInfo{} } -func (m *ServiceInfo) String() string { return proto.CompactTextString(m) } -func (*ServiceInfo) ProtoMessage() {} -func (*ServiceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{3} -} - -func (m *ServiceInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceInfo.Unmarshal(m, b) -} -func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic) -} -func (m *ServiceInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceInfo.Merge(m, src) -} -func (m *ServiceInfo) XXX_Size() int { - return xxx_messageInfo_ServiceInfo.Size(m) -} -func (m *ServiceInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo - -func (m *ServiceInfo) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func init() { - proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value) - proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node") - proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry") - proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier") - proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo") - proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo") -} - -func init() { - proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84) -} - -var fileDescriptor_126c72ed8a252c84 = []byte{ - // 590 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e, - 0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee, - 0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01, - 0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde, - 0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9, - 0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2, - 0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89, - 0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9, - 0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7, - 0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39, - 0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b, - 0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b, - 0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13, - 0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06, - 0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d, - 0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67, - 0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2, - 0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a, - 0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a, - 0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76, - 0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23, - 0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c, - 0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c, - 0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92, - 0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e, - 0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51, - 0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14, - 0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83, - 0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0, - 0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86, - 0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4, - 0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd, - 0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9, - 0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c, - 0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70, - 0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1, - 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00, -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go deleted file mode 100644 index 801212d925..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go +++ /dev/null @@ -1,264 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/agent/metrics/v1/metrics_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" - v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type ExportMetricsServiceRequest struct { - // This is required only in the first message on the stream or if the - // previous sent ExportMetricsServiceRequest message has a different Node (e.g. - // when the same RPC is used to send Metrics from multiple Applications). - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // A list of metrics that belong to the last received Node. - Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` - // The resource for the metrics in this message that do not have an explicit - // resource set. - // If unset, the most recently set resource in the RPC stream applies. It is - // valid to never be set within a stream, e.g. when no resource info is known - // at all or when all sent metrics have an explicit resource set. - Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } -func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceRequest) ProtoMessage() {} -func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_47e253a956287d04, []int{0} -} - -func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) -} -func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) -} -func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) -} -func (m *ExportMetricsServiceRequest) XXX_Size() int { - return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) -} -func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo - -func (m *ExportMetricsServiceRequest) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric { - if m != nil { - return m.Metrics - } - return nil -} - -func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource { - if m != nil { - return m.Resource - } - return nil -} - -type ExportMetricsServiceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } -func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceResponse) ProtoMessage() {} -func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_47e253a956287d04, []int{1} -} - -func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) -} -func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) -} -func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) -} -func (m *ExportMetricsServiceResponse) XXX_Size() int { - return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) -} -func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest") - proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse") -} - -func init() { - proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04) -} - -var fileDescriptor_47e253a956287d04 = []byte{ - // 340 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0x4a, 0xf3, 0x40, - 0x14, 0x85, 0xff, 0xf9, 0x2b, 0x55, 0xa6, 0xe0, 0x62, 0xdc, 0x94, 0x2a, 0x52, 0xab, 0x48, 0x45, - 0x32, 0x63, 0xea, 0x42, 0x10, 0x54, 0x28, 0xb8, 0x11, 0x94, 0x12, 0x77, 0x6e, 0xa4, 0x4d, 0x2f, - 0x71, 0x16, 0x99, 0x1b, 0x67, 0x26, 0xc1, 0x57, 0x70, 0xe5, 0x3b, 0xf8, 0x5c, 0x3e, 0x8c, 0x24, - 0x93, 0xb4, 0x94, 0x18, 0x0b, 0xee, 0x2e, 0x99, 0xf3, 0x9d, 0x9c, 0x33, 0x73, 0xe9, 0x05, 0x26, - 0xa0, 0x42, 0x50, 0x26, 0x35, 0x22, 0xd1, 0x68, 0x51, 0x4c, 0x23, 0x50, 0x56, 0xc4, 0x60, 0xb5, - 0x0c, 0x8d, 0xc8, 0xfc, 0x6a, 0x7c, 0x36, 0xa0, 0x33, 0x19, 0x02, 0x2f, 0x64, 0xec, 0x60, 0x09, - 0xba, 0x2f, 0xbc, 0x00, 0x79, 0xa9, 0xe6, 0x99, 0xdf, 0xf3, 0x1a, 0xbc, 0x43, 0x8c, 0x63, 0x54, - 0xb9, 0xb5, 0x9b, 0x1c, 0xdf, 0x3b, 0xa9, 0xc9, 0xeb, 0x21, 0x4a, 0xe9, 0x69, 0x4d, 0xaa, 0xc1, - 0x60, 0xaa, 0x43, 0xc8, 0xb5, 0xd5, 0xec, 0xc4, 0x83, 0x2f, 0x42, 0x77, 0x6f, 0xdf, 0x12, 0xd4, - 0xf6, 0xde, 0x99, 0x3c, 0xba, 0x22, 0x01, 0xbc, 0xa6, 0x60, 0x2c, 0xbb, 0xa4, 0x1b, 0x0a, 0xe7, - 0xd0, 0x25, 0x7d, 0x32, 0xec, 0x8c, 0x8e, 0x79, 0x43, 0xb1, 0x32, 0x6b, 0xe6, 0xf3, 0x07, 0x9c, - 0x43, 0x50, 0x30, 0xec, 0x8a, 0x6e, 0x96, 0xc9, 0xba, 0xff, 0xfb, 0xad, 0x61, 0x67, 0x74, 0x58, - 0xc7, 0x97, 0x37, 0xc2, 0x5d, 0x80, 0xa0, 0x62, 0xd8, 0x98, 0x6e, 0x55, 0x61, 0xbb, 0xad, 0xa6, - 0xdf, 0x2f, 0xea, 0x64, 0x3e, 0x0f, 0xca, 0x39, 0x58, 0x70, 0x83, 0x7d, 0xba, 0xf7, 0x73, 0x3b, - 0x93, 0xa0, 0x32, 0x30, 0xfa, 0x24, 0x74, 0x7b, 0xf5, 0x88, 0x7d, 0x10, 0xda, 0x76, 0x0c, 0xbb, - 0xe6, 0x6b, 0xdf, 0x91, 0xff, 0x72, 0x79, 0xbd, 0x9b, 0x3f, 0xf3, 0x2e, 0xde, 0xe0, 0xdf, 0x90, - 0x9c, 0x91, 0xf1, 0x3b, 0xa1, 0x47, 0x12, 0xd7, 0x7b, 0x8d, 0x77, 0x56, 0x6d, 0x26, 0xb9, 0x6a, - 0x42, 0x9e, 0xee, 0x22, 0x69, 0x5f, 0xd2, 0x59, 0xfe, 0x48, 0xc2, 0x19, 0x78, 0x52, 0x19, 0xab, - 0xd3, 0x18, 0x94, 0x9d, 0x5a, 0x89, 0x4a, 0x2c, 0xbd, 0x3d, 0xb7, 0x32, 0x11, 0x28, 0x2f, 0xaa, - 0xef, 0xfb, 0xac, 0x5d, 0x1c, 0x9f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x61, 0x3b, 0xc3, - 0x1b, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetricsServiceClient is the client API for MetricsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) -} - -type metricsServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { - return &metricsServiceClient{cc} -} - -func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) { - stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...) - if err != nil { - return nil, err - } - x := &metricsServiceExportClient{stream} - return x, nil -} - -type MetricsService_ExportClient interface { - Send(*ExportMetricsServiceRequest) error - Recv() (*ExportMetricsServiceResponse, error) - grpc.ClientStream -} - -type metricsServiceExportClient struct { - grpc.ClientStream -} - -func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) { - m := new(ExportMetricsServiceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// MetricsServiceServer is the server API for MetricsService service. -type MetricsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(MetricsService_ExportServer) error -} - -func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { - s.RegisterService(&_MetricsService_serviceDesc, srv) -} - -func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream}) -} - -type MetricsService_ExportServer interface { - Send(*ExportMetricsServiceResponse) error - Recv() (*ExportMetricsServiceRequest, error) - grpc.ServerStream -} - -type metricsServiceExportServer struct { - grpc.ServerStream -} - -func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) { - m := new(ExportMetricsServiceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _MetricsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService", - HandlerType: (*MetricsServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Export", - Handler: _MetricsService_Export_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto", -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go deleted file mode 100644 index e7c49a387a..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go +++ /dev/null @@ -1,443 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/agent/trace/v1/trace_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type CurrentLibraryConfig struct { - // This is required only in the first message on the stream or if the - // previous sent CurrentLibraryConfig message has a different Node (e.g. - // when the same RPC is used to configure multiple Applications). - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // Current configuration. - Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} } -func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) } -func (*CurrentLibraryConfig) ProtoMessage() {} -func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{0} -} - -func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b) -} -func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic) -} -func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_CurrentLibraryConfig.Merge(m, src) -} -func (m *CurrentLibraryConfig) XXX_Size() int { - return xxx_messageInfo_CurrentLibraryConfig.Size(m) -} -func (m *CurrentLibraryConfig) XXX_DiscardUnknown() { - xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo - -func (m *CurrentLibraryConfig) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig { - if m != nil { - return m.Config - } - return nil -} - -type UpdatedLibraryConfig struct { - // This field is ignored when the RPC is used to configure only one Application. - // This is required only in the first message on the stream or if the - // previous sent UpdatedLibraryConfig message has a different Node. - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // Requested updated configuration. - Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} } -func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) } -func (*UpdatedLibraryConfig) ProtoMessage() {} -func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{1} -} - -func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b) -} -func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic) -} -func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src) -} -func (m *UpdatedLibraryConfig) XXX_Size() int { - return xxx_messageInfo_UpdatedLibraryConfig.Size(m) -} -func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() { - xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo - -func (m *UpdatedLibraryConfig) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig { - if m != nil { - return m.Config - } - return nil -} - -type ExportTraceServiceRequest struct { - // This is required only in the first message on the stream or if the - // previous sent ExportTraceServiceRequest message has a different Node (e.g. - // when the same RPC is used to send Spans from multiple Applications). - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // A list of Spans that belong to the last received Node. - Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` - // The resource for the spans in this message that do not have an explicit - // resource set. - // If unset, the most recently set resource in the RPC stream applies. It is - // valid to never be set within a stream, e.g. when no resource info is known. - Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } -func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceRequest) ProtoMessage() {} -func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{2} -} - -func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) -} -func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) -} -func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) -} -func (m *ExportTraceServiceRequest) XXX_Size() int { - return xxx_messageInfo_ExportTraceServiceRequest.Size(m) -} -func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo - -func (m *ExportTraceServiceRequest) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span { - if m != nil { - return m.Spans - } - return nil -} - -func (m *ExportTraceServiceRequest) GetResource() *v12.Resource { - if m != nil { - return m.Resource - } - return nil -} - -type ExportTraceServiceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } -func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceResponse) ProtoMessage() {} -func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{3} -} - -func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) -} -func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) -} -func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) -} -func (m *ExportTraceServiceResponse) XXX_Size() int { - return xxx_messageInfo_ExportTraceServiceResponse.Size(m) -} -func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig") - proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig") - proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest") - proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse") -} - -func init() { - proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5) -} - -var fileDescriptor_7027f99caf7ac6a5 = []byte{ - // 423 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40, - 0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a, - 0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8, - 0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3, - 0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d, - 0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49, - 0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91, - 0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d, - 0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7, - 0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf, - 0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73, - 0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1, - 0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31, - 0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95, - 0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d, - 0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90, - 0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d, - 0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d, - 0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89, - 0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44, - 0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e, - 0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae, - 0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59, - 0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94, - 0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00, - 0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf, - 0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TraceServiceClient is the client API for TraceService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TraceServiceClient interface { - // After initialization, this RPC must be kept alive for the entire life of - // the application. The agent pushes configs down to applications via a - // stream. - Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) -} - -type traceServiceClient struct { - cc *grpc.ClientConn -} - -func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { - return &traceServiceClient{cc} -} - -func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { - stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) - if err != nil { - return nil, err - } - x := &traceServiceConfigClient{stream} - return x, nil -} - -type TraceService_ConfigClient interface { - Send(*CurrentLibraryConfig) error - Recv() (*UpdatedLibraryConfig, error) - grpc.ClientStream -} - -type traceServiceConfigClient struct { - grpc.ClientStream -} - -func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { - return x.ClientStream.SendMsg(m) -} - -func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { - m := new(UpdatedLibraryConfig) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { - stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) - if err != nil { - return nil, err - } - x := &traceServiceExportClient{stream} - return x, nil -} - -type TraceService_ExportClient interface { - Send(*ExportTraceServiceRequest) error - Recv() (*ExportTraceServiceResponse, error) - grpc.ClientStream -} - -type traceServiceExportClient struct { - grpc.ClientStream -} - -func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { - m := new(ExportTraceServiceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TraceServiceServer is the server API for TraceService service. -type TraceServiceServer interface { - // After initialization, this RPC must be kept alive for the entire life of - // the application. The agent pushes configs down to applications via a - // stream. - Config(TraceService_ConfigServer) error - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(TraceService_ExportServer) error -} - -func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) -} - -func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) -} - -type TraceService_ConfigServer interface { - Send(*UpdatedLibraryConfig) error - Recv() (*CurrentLibraryConfig, error) - grpc.ServerStream -} - -type traceServiceConfigServer struct { - grpc.ServerStream -} - -func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { - return x.ServerStream.SendMsg(m) -} - -func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { - m := new(CurrentLibraryConfig) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) -} - -type TraceService_ExportServer interface { - Send(*ExportTraceServiceResponse) error - Recv() (*ExportTraceServiceRequest, error) - grpc.ServerStream -} - -type traceServiceExportServer struct { - grpc.ServerStream -} - -func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { - m := new(ExportTraceServiceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _TraceService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opencensus.proto.agent.trace.v1.TraceService", - HandlerType: (*TraceServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Config", - Handler: _TraceService_Config_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "Export", - Handler: _TraceService_Export_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go deleted file mode 100644 index bd4b8a8278..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: opencensus/proto/agent/trace/v1/trace_service.proto - -/* -Package v1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1 - -import ( - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) { - var metadata runtime.ServerMetadata - stream, err := client.Export(ctx) - if err != nil { - grpclog.Infof("Failed to start streaming: %v", err) - return nil, metadata, err - } - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, berr - } - dec := marshaler.NewDecoder(newReader()) - handleSend := func() error { - var protoReq ExportTraceServiceRequest - err := dec.Decode(&protoReq) - if err == io.EOF { - return err - } - if err != nil { - grpclog.Infof("Failed to decode request: %v", err) - return err - } - if err := stream.Send(&protoReq); err != nil { - grpclog.Infof("Failed to send request: %v", err) - return err - } - return nil - } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Infof("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } - go func() { - for { - if err := handleSend(); err != nil { - break - } - } - if err := stream.CloseSend(); err != nil { - grpclog.Infof("Failed to terminate client stream: %v", err) - } - }() - header, err := stream.Header() - if err != nil { - grpclog.Infof("Failed to get header from client: %v", err) - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil -} - -// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterTraceServiceHandler(ctx, mux, conn) -} - -// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) -} - -// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "TraceServiceClient" to call the correct interceptors. -func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { - - mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) -) - -var ( - forward_TraceService_Export_0 = runtime.ForwardResponseStream -) diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go deleted file mode 100644 index 53b8aa99e1..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go +++ /dev/null @@ -1,1126 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/metrics/v1/metrics.proto - -package v1 - -import ( - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// The kind of metric. It describes how the data is reported. -// -// A gauge is an instantaneous measurement of a value. -// -// A cumulative measurement is a value accumulated over a time interval. In -// a time series, cumulative measurements should have the same start time, -// increasing values and increasing end times, until an event resets the -// cumulative value to zero and sets a new start time for the following -// points. -type MetricDescriptor_Type int32 - -const ( - // Do not use this default value. - MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0 - // Integer gauge. The value can go both up and down. - MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1 - // Floating point gauge. The value can go both up and down. - MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2 - // Distribution gauge measurement. The count and sum can go both up and - // down. Recorded values are always >= 0. - // Used in scenarios like a snapshot of time the current items in a queue - // have spent there. - MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3 - // Integer cumulative measurement. The value cannot decrease, if resets - // then the start_time should also be reset. - MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4 - // Floating point cumulative measurement. The value cannot decrease, if - // resets then the start_time should also be reset. Recorded values are - // always >= 0. - MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5 - // Distribution cumulative measurement. The count and sum cannot decrease, - // if resets then the start_time should also be reset. - MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6 - // Some frameworks implemented Histograms as a summary of observations - // (usually things like request durations and response sizes). While it - // also provides a total count of observations and a sum of all observed - // values, it calculates configurable percentiles over a sliding time - // window. This is not recommended, since it cannot be aggregated. - MetricDescriptor_SUMMARY MetricDescriptor_Type = 7 -) - -var MetricDescriptor_Type_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "GAUGE_INT64", - 2: "GAUGE_DOUBLE", - 3: "GAUGE_DISTRIBUTION", - 4: "CUMULATIVE_INT64", - 5: "CUMULATIVE_DOUBLE", - 6: "CUMULATIVE_DISTRIBUTION", - 7: "SUMMARY", -} - -var MetricDescriptor_Type_value = map[string]int32{ - "UNSPECIFIED": 0, - "GAUGE_INT64": 1, - "GAUGE_DOUBLE": 2, - "GAUGE_DISTRIBUTION": 3, - "CUMULATIVE_INT64": 4, - "CUMULATIVE_DOUBLE": 5, - "CUMULATIVE_DISTRIBUTION": 6, - "SUMMARY": 7, -} - -func (x MetricDescriptor_Type) String() string { - return proto.EnumName(MetricDescriptor_Type_name, int32(x)) -} - -func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{1, 0} -} - -// Defines a Metric which has one or more timeseries. -type Metric struct { - // The descriptor of the Metric. - // TODO(issue #152): consider only sending the name of descriptor for - // optimization. - MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` - // One or more timeseries for a single metric, where each timeseries has - // one or more points. - Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"` - // The resource for the metric. If unset, it may be set to a default value - // provided for a sequence of messages in an RPC stream. - Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{0} -} - -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetMetricDescriptor() *MetricDescriptor { - if m != nil { - return m.MetricDescriptor - } - return nil -} - -func (m *Metric) GetTimeseries() []*TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -func (m *Metric) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - -// Defines a metric type and its schema. -type MetricDescriptor struct { - // The metric type, including its DNS name prefix. It must be unique. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // A detailed description of the metric, which can be used in documentation. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - // The unit in which the metric value is reported. Follows the format - // described by http://unitsofmeasure.org/ucum.html. - Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` - Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"` - // The label keys associated with the metric descriptor. - LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } -func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } -func (*MetricDescriptor) ProtoMessage() {} -func (*MetricDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{1} -} - -func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) -} -func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) -} -func (m *MetricDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricDescriptor.Merge(m, src) -} -func (m *MetricDescriptor) XXX_Size() int { - return xxx_messageInfo_MetricDescriptor.Size(m) -} -func (m *MetricDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo - -func (m *MetricDescriptor) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *MetricDescriptor) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *MetricDescriptor) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -func (m *MetricDescriptor) GetType() MetricDescriptor_Type { - if m != nil { - return m.Type - } - return MetricDescriptor_UNSPECIFIED -} - -func (m *MetricDescriptor) GetLabelKeys() []*LabelKey { - if m != nil { - return m.LabelKeys - } - return nil -} - -// Defines a label key associated with a metric descriptor. -type LabelKey struct { - // The key for the label. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // A human-readable description of what this label key represents. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelKey) Reset() { *m = LabelKey{} } -func (m *LabelKey) String() string { return proto.CompactTextString(m) } -func (*LabelKey) ProtoMessage() {} -func (*LabelKey) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{2} -} - -func (m *LabelKey) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelKey.Unmarshal(m, b) -} -func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic) -} -func (m *LabelKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelKey.Merge(m, src) -} -func (m *LabelKey) XXX_Size() int { - return xxx_messageInfo_LabelKey.Size(m) -} -func (m *LabelKey) XXX_DiscardUnknown() { - xxx_messageInfo_LabelKey.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelKey proto.InternalMessageInfo - -func (m *LabelKey) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *LabelKey) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -// A collection of data points that describes the time-varying values -// of a metric. -type TimeSeries struct { - // Must be present for cumulative metrics. The time when the cumulative value - // was reset to zero. Exclusive. The cumulative value is over the time interval - // (start_timestamp, timestamp]. If not specified, the backend can use the - // previous recorded value. - StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` - // The set of label values that uniquely identify this timeseries. Applies to - // all points. The order of label values must match that of label keys in the - // metric descriptor. - LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` - // The data points of this timeseries. Point.value type MUST match the - // MetricDescriptor.type. - Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (m *TimeSeries) String() string { return proto.CompactTextString(m) } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{3} -} - -func (m *TimeSeries) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TimeSeries.Unmarshal(m, b) -} -func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) -} -func (m *TimeSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeries.Merge(m, src) -} -func (m *TimeSeries) XXX_Size() int { - return xxx_messageInfo_TimeSeries.Size(m) -} -func (m *TimeSeries) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeries.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeries proto.InternalMessageInfo - -func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp { - if m != nil { - return m.StartTimestamp - } - return nil -} - -func (m *TimeSeries) GetLabelValues() []*LabelValue { - if m != nil { - return m.LabelValues - } - return nil -} - -func (m *TimeSeries) GetPoints() []*Point { - if m != nil { - return m.Points - } - return nil -} - -type LabelValue struct { - // The value for the label. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - // If false the value field is ignored and considered not set. - // This is used to differentiate a missing label from an empty string. - HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelValue) Reset() { *m = LabelValue{} } -func (m *LabelValue) String() string { return proto.CompactTextString(m) } -func (*LabelValue) ProtoMessage() {} -func (*LabelValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{4} -} - -func (m *LabelValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelValue.Unmarshal(m, b) -} -func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic) -} -func (m *LabelValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValue.Merge(m, src) -} -func (m *LabelValue) XXX_Size() int { - return xxx_messageInfo_LabelValue.Size(m) -} -func (m *LabelValue) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValue.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValue proto.InternalMessageInfo - -func (m *LabelValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *LabelValue) GetHasValue() bool { - if m != nil { - return m.HasValue - } - return false -} - -// A timestamped measurement. -type Point struct { - // The moment when this point was recorded. Inclusive. - // If not specified, the timestamp will be decided by the backend. - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // The actual point value. - // - // Types that are valid to be assigned to Value: - // *Point_Int64Value - // *Point_DoubleValue - // *Point_DistributionValue - // *Point_SummaryValue - Value isPoint_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Point) Reset() { *m = Point{} } -func (m *Point) String() string { return proto.CompactTextString(m) } -func (*Point) ProtoMessage() {} -func (*Point) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{5} -} - -func (m *Point) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Point.Unmarshal(m, b) -} -func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Point.Marshal(b, m, deterministic) -} -func (m *Point) XXX_Merge(src proto.Message) { - xxx_messageInfo_Point.Merge(m, src) -} -func (m *Point) XXX_Size() int { - return xxx_messageInfo_Point.Size(m) -} -func (m *Point) XXX_DiscardUnknown() { - xxx_messageInfo_Point.DiscardUnknown(m) -} - -var xxx_messageInfo_Point proto.InternalMessageInfo - -func (m *Point) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -type isPoint_Value interface { - isPoint_Value() -} - -type Point_Int64Value struct { - Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` -} - -type Point_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -type Point_DistributionValue struct { - DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"` -} - -type Point_SummaryValue struct { - SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"` -} - -func (*Point_Int64Value) isPoint_Value() {} - -func (*Point_DoubleValue) isPoint_Value() {} - -func (*Point_DistributionValue) isPoint_Value() {} - -func (*Point_SummaryValue) isPoint_Value() {} - -func (m *Point) GetValue() isPoint_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *Point) GetInt64Value() int64 { - if x, ok := m.GetValue().(*Point_Int64Value); ok { - return x.Int64Value - } - return 0 -} - -func (m *Point) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*Point_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *Point) GetDistributionValue() *DistributionValue { - if x, ok := m.GetValue().(*Point_DistributionValue); ok { - return x.DistributionValue - } - return nil -} - -func (m *Point) GetSummaryValue() *SummaryValue { - if x, ok := m.GetValue().(*Point_SummaryValue); ok { - return x.SummaryValue - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Point) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Point_Int64Value)(nil), - (*Point_DoubleValue)(nil), - (*Point_DistributionValue)(nil), - (*Point_SummaryValue)(nil), - } -} - -// Distribution contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those -// values across a set of buckets. -type DistributionValue struct { - // The number of values in the population. Must be non-negative. This value - // must equal the sum of the values in bucket_counts if a histogram is - // provided. - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - // The sum of the values in the population. If count is zero then this field - // must be zero. - Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` - // The sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If count is zero then this field must be zero. - SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` - // Don't change bucket boundaries within a TimeSeries if your backend doesn't - // support this. - // TODO(issue #152): consider not required to send bucket options for - // optimization. - BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` - // If the distribution does not have a histogram, then omit this field. - // If there is a histogram, then the sum of the values in the Bucket counts - // must equal the value in the count field of the distribution. - Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue) Reset() { *m = DistributionValue{} } -func (m *DistributionValue) String() string { return proto.CompactTextString(m) } -func (*DistributionValue) ProtoMessage() {} -func (*DistributionValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6} -} - -func (m *DistributionValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue.Unmarshal(m, b) -} -func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic) -} -func (m *DistributionValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue.Merge(m, src) -} -func (m *DistributionValue) XXX_Size() int { - return xxx_messageInfo_DistributionValue.Size(m) -} -func (m *DistributionValue) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue proto.InternalMessageInfo - -func (m *DistributionValue) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *DistributionValue) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *DistributionValue) GetSumOfSquaredDeviation() float64 { - if m != nil { - return m.SumOfSquaredDeviation - } - return 0 -} - -func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions { - if m != nil { - return m.BucketOptions - } - return nil -} - -func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket { - if m != nil { - return m.Buckets - } - return nil -} - -// A Distribution may optionally contain a histogram of the values in the -// population. The bucket boundaries for that histogram are described by -// BucketOptions. -// -// If bucket_options has no type, then there is no histogram associated with -// the Distribution. -type DistributionValue_BucketOptions struct { - // Types that are valid to be assigned to Type: - // *DistributionValue_BucketOptions_Explicit_ - Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} } -func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_BucketOptions) ProtoMessage() {} -func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 0} -} - -func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b) -} -func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic) -} -func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src) -} -func (m *DistributionValue_BucketOptions) XXX_Size() int { - return xxx_messageInfo_DistributionValue_BucketOptions.Size(m) -} -func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo - -type isDistributionValue_BucketOptions_Type interface { - isDistributionValue_BucketOptions_Type() -} - -type DistributionValue_BucketOptions_Explicit_ struct { - Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"` -} - -func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {} - -func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit { - if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok { - return x.Explicit - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*DistributionValue_BucketOptions_Explicit_)(nil), - } -} - -// Specifies a set of buckets with arbitrary upper-bounds. -// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket -// index i are: -// -// [0, bucket_bounds[i]) for i == 0 -// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1 -// [bucket_bounds[i], +infinity) for i == N-1 -type DistributionValue_BucketOptions_Explicit struct { - // The values must be strictly increasing and > 0. - Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_BucketOptions_Explicit) Reset() { - *m = DistributionValue_BucketOptions_Explicit{} -} -func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {} -func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0} -} - -func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int { - return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo - -func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 { - if m != nil { - return m.Bounds - } - return nil -} - -type DistributionValue_Bucket struct { - // The number of values in each bucket of the histogram, as described in - // bucket_bounds. - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - // If the distribution does not have a histogram, then omit this field. - Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} } -func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_Bucket) ProtoMessage() {} -func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 1} -} - -func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b) -} -func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic) -} -func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_Bucket.Merge(m, src) -} -func (m *DistributionValue_Bucket) XXX_Size() int { - return xxx_messageInfo_DistributionValue_Bucket.Size(m) -} -func (m *DistributionValue_Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo - -func (m *DistributionValue_Bucket) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -// Exemplars are example points that may be used to annotate aggregated -// Distribution values. They are metadata that gives information about a -// particular value added to a Distribution bucket. -type DistributionValue_Exemplar struct { - // Value of the exemplar point. It determines which bucket the exemplar - // belongs to. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - // The observation (sampling) time of the above value. - Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Contextual information about the example value. - Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} } -func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_Exemplar) ProtoMessage() {} -func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 2} -} - -func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b) -} -func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic) -} -func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src) -} -func (m *DistributionValue_Exemplar) XXX_Size() int { - return xxx_messageInfo_DistributionValue_Exemplar.Size(m) -} -func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo - -func (m *DistributionValue_Exemplar) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -func (m *DistributionValue_Exemplar) GetAttachments() map[string]string { - if m != nil { - return m.Attachments - } - return nil -} - -// The start_timestamp only applies to the count and sum in the SummaryValue. -type SummaryValue struct { - // The total number of recorded values since start_time. Optional since - // some systems don't expose this. - Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` - // The total sum of recorded values since start_time. Optional since some - // systems don't expose this. If count is zero then this field must be zero. - // This field must be unset if the sum is not available. - Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` - // Values calculated over an arbitrary time window. - Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue) Reset() { *m = SummaryValue{} } -func (m *SummaryValue) String() string { return proto.CompactTextString(m) } -func (*SummaryValue) ProtoMessage() {} -func (*SummaryValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{7} -} - -func (m *SummaryValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SummaryValue.Unmarshal(m, b) -} -func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic) -} -func (m *SummaryValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue.Merge(m, src) -} -func (m *SummaryValue) XXX_Size() int { - return xxx_messageInfo_SummaryValue.Size(m) -} -func (m *SummaryValue) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue proto.InternalMessageInfo - -func (m *SummaryValue) GetCount() *wrappers.Int64Value { - if m != nil { - return m.Count - } - return nil -} - -func (m *SummaryValue) GetSum() *wrappers.DoubleValue { - if m != nil { - return m.Sum - } - return nil -} - -func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -// The values in this message can be reset at arbitrary unknown times, with -// the requirement that all of them are reset at the same time. -type SummaryValue_Snapshot struct { - // The number of values in the snapshot. Optional since some systems don't - // expose this. - Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` - // The sum of values in the snapshot. Optional since some systems don't - // expose this. If count is zero then this field must be zero or not set - // (if not supported). - Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` - // A list of values at different percentiles of the distribution calculated - // from the current snapshot. The percentiles must be strictly increasing. - PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} } -func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) } -func (*SummaryValue_Snapshot) ProtoMessage() {} -func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{7, 0} -} - -func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b) -} -func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic) -} -func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src) -} -func (m *SummaryValue_Snapshot) XXX_Size() int { - return xxx_messageInfo_SummaryValue_Snapshot.Size(m) -} -func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo - -func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value { - if m != nil { - return m.Count - } - return nil -} - -func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue { - if m != nil { - return m.Sum - } - return nil -} - -func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile { - if m != nil { - return m.PercentileValues - } - return nil -} - -// Represents the value at a given percentile of a distribution. -type SummaryValue_Snapshot_ValueAtPercentile struct { - // The percentile of a distribution. Must be in the interval - // (0.0, 100.0]. - Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"` - // The value at the given percentile of a distribution. - Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() { - *m = SummaryValue_Snapshot_ValueAtPercentile{} -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) } -func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {} -func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0} -} - -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int { - return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo - -func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 { - if m != nil { - return m.Percentile - } - return 0 -} - -func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func init() { - proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value) - proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric") - proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor") - proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey") - proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries") - proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue") - proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point") - proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue") - proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions") - proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit") - proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket") - proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar") - proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry") - proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue") - proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot") - proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile") -} - -func init() { - proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a) -} - -var fileDescriptor_0ee3deb72053811a = []byte{ - // 1098 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, - 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xdb, 0xf5, 0xa8, 0xed, 0xdf, 0xda, 0xfc, 0x15, 0xc2, - 0x22, 0x20, 0x15, 0xca, 0x5a, 0x31, 0xa5, 0xad, 0x2a, 0x54, 0x14, 0xc7, 0x6e, 0x62, 0xc8, 0x87, - 0x35, 0xb6, 0x2b, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x35, 0xf8, - 0x05, 0x78, 0x04, 0xae, 0xb9, 0x45, 0x3c, 0x07, 0x57, 0x3c, 0x01, 0x4f, 0x81, 0x78, 0x03, 0xb4, - 0x33, 0xb3, 0x1f, 0x89, 0xc1, 0xd4, 0x45, 0xe2, 0xee, 0x9c, 0x33, 0xe7, 0xfc, 0xfc, 0x3b, 0x9f, - 0x5e, 0x78, 0xe4, 0x07, 0xc4, 0xb3, 0x89, 0x47, 0x23, 0xda, 0x08, 0x42, 0x9f, 0xf9, 0x0d, 0x97, - 0xb0, 0xd0, 0xb1, 0x69, 0x63, 0xb6, 0x9f, 0x88, 0x26, 0x7f, 0x40, 0x5b, 0x99, 0xab, 0xb0, 0x98, - 0xc9, 0xfb, 0x6c, 0x5f, 0x7f, 0xef, 0xd2, 0xf7, 0x2f, 0xa7, 0x44, 0x60, 0x8c, 0xa3, 0x8b, 0x06, - 0x73, 0x5c, 0x42, 0x99, 0xe5, 0x06, 0xc2, 0x57, 0xdf, 0xbe, 0xed, 0xf0, 0x6d, 0x68, 0x05, 0x01, - 0x09, 0x25, 0x96, 0xfe, 0xc9, 0x02, 0x91, 0x90, 0x50, 0x3f, 0x0a, 0x6d, 0x12, 0x33, 0x49, 0x64, - 0xe1, 0x6c, 0xfc, 0xa1, 0x40, 0xf9, 0x94, 0xff, 0x38, 0x7a, 0x0d, 0x35, 0x41, 0x63, 0x34, 0x21, - 0xd4, 0x0e, 0x9d, 0x80, 0xf9, 0x61, 0x5d, 0xd9, 0x51, 0x76, 0xd5, 0xe6, 0x9e, 0xb9, 0x84, 0xb1, - 0x29, 0xe2, 0xdb, 0x69, 0x10, 0xd6, 0xdc, 0x5b, 0x16, 0x74, 0x04, 0xc0, 0xd3, 0x20, 0xa1, 0x43, - 0x68, 0xbd, 0xb0, 0x53, 0xdc, 0x55, 0x9b, 0x1f, 0x2f, 0x05, 0x1d, 0x38, 0x2e, 0xe9, 0x73, 0x77, - 0x9c, 0x0b, 0x45, 0x2d, 0xa8, 0x24, 0x19, 0xd4, 0x8b, 0x9c, 0xdb, 0x47, 0x8b, 0x30, 0x69, 0x8e, - 0xb3, 0x7d, 0x13, 0x4b, 0x19, 0xa7, 0x71, 0xc6, 0x0f, 0x45, 0xd0, 0x6e, 0x73, 0x46, 0x08, 0x4a, - 0x9e, 0xe5, 0x12, 0x9e, 0xf0, 0x26, 0xe6, 0x32, 0xda, 0x01, 0x35, 0x29, 0x85, 0xe3, 0x7b, 0xf5, - 0x02, 0x7f, 0xca, 0x9b, 0xe2, 0xa8, 0xc8, 0x73, 0x18, 0xa7, 0xb2, 0x89, 0xb9, 0x8c, 0x5e, 0x42, - 0x89, 0xcd, 0x03, 0x52, 0x2f, 0xed, 0x28, 0xbb, 0x77, 0x9b, 0xcd, 0x95, 0x4a, 0x67, 0x0e, 0xe6, - 0x01, 0xc1, 0x3c, 0x1e, 0xb5, 0x01, 0xa6, 0xd6, 0x98, 0x4c, 0x47, 0xd7, 0x64, 0x4e, 0xeb, 0xeb, - 0xbc, 0x66, 0x1f, 0x2e, 0x45, 0x3b, 0x89, 0xdd, 0xbf, 0x22, 0x73, 0xbc, 0x39, 0x95, 0x12, 0x35, - 0x7e, 0x52, 0xa0, 0x14, 0x83, 0xa2, 0x7b, 0xa0, 0x0e, 0xcf, 0xfa, 0xbd, 0xce, 0x61, 0xf7, 0x65, - 0xb7, 0xd3, 0xd6, 0xd6, 0x62, 0xc3, 0xd1, 0xc1, 0xf0, 0xa8, 0x33, 0xea, 0x9e, 0x0d, 0x9e, 0x3c, - 0xd6, 0x14, 0xa4, 0x41, 0x55, 0x18, 0xda, 0xe7, 0xc3, 0xd6, 0x49, 0x47, 0x2b, 0xa0, 0x87, 0x80, - 0xa4, 0xa5, 0xdb, 0x1f, 0xe0, 0x6e, 0x6b, 0x38, 0xe8, 0x9e, 0x9f, 0x69, 0x45, 0x74, 0x1f, 0xb4, - 0xc3, 0xe1, 0xe9, 0xf0, 0xe4, 0x60, 0xd0, 0x7d, 0x95, 0xc4, 0x97, 0xd0, 0x03, 0xa8, 0xe5, 0xac, - 0x12, 0x64, 0x1d, 0x6d, 0xc1, 0xff, 0xf2, 0xe6, 0x3c, 0x52, 0x19, 0xa9, 0xb0, 0xd1, 0x1f, 0x9e, - 0x9e, 0x1e, 0xe0, 0xaf, 0xb5, 0x0d, 0xe3, 0x05, 0x54, 0x92, 0x14, 0x90, 0x06, 0xc5, 0x6b, 0x32, - 0x97, 0xed, 0x88, 0xc5, 0x7f, 0xee, 0x86, 0xf1, 0x9b, 0x02, 0x90, 0xcd, 0x0d, 0x3a, 0x84, 0x7b, - 0x94, 0x59, 0x21, 0x1b, 0xa5, 0x1b, 0x24, 0xc7, 0x59, 0x37, 0xc5, 0x0a, 0x99, 0xc9, 0x0a, 0xf1, - 0x69, 0xe3, 0x1e, 0xf8, 0x2e, 0x0f, 0x49, 0x75, 0xf4, 0x25, 0x54, 0x45, 0x17, 0x66, 0xd6, 0x34, - 0x7a, 0xcb, 0xd9, 0xe5, 0x49, 0xbc, 0x8a, 0xfd, 0xb1, 0x3a, 0x4d, 0x65, 0x8a, 0x9e, 0x43, 0x39, - 0xf0, 0x1d, 0x8f, 0xd1, 0x7a, 0x91, 0xa3, 0x18, 0x4b, 0x51, 0x7a, 0xb1, 0x2b, 0x96, 0x11, 0xc6, - 0x17, 0x00, 0x19, 0x2c, 0xba, 0x0f, 0xeb, 0x9c, 0x8f, 0xac, 0x8f, 0x50, 0xd0, 0x16, 0x6c, 0x5e, - 0x59, 0x54, 0x30, 0xe5, 0xf5, 0xa9, 0xe0, 0xca, 0x95, 0x45, 0x79, 0x88, 0xf1, 0x4b, 0x01, 0xd6, - 0x39, 0x24, 0x7a, 0x06, 0x9b, 0xab, 0x54, 0x24, 0x73, 0x46, 0xef, 0x83, 0xea, 0x78, 0xec, 0xc9, - 0xe3, 0xdc, 0x4f, 0x14, 0x8f, 0xd7, 0x30, 0x70, 0xa3, 0x60, 0xf6, 0x01, 0x54, 0x27, 0x7e, 0x34, - 0x9e, 0x12, 0xe9, 0x13, 0x6f, 0x86, 0x72, 0xbc, 0x86, 0x55, 0x61, 0x15, 0x4e, 0x23, 0x40, 0x13, - 0x87, 0xb2, 0xd0, 0x19, 0x47, 0x71, 0xe3, 0xa4, 0x6b, 0x89, 0x53, 0x31, 0x97, 0x16, 0xa5, 0x9d, - 0x0b, 0xe3, 0x58, 0xc7, 0x6b, 0xb8, 0x36, 0xb9, 0x6d, 0x44, 0x3d, 0xb8, 0x43, 0x23, 0xd7, 0xb5, - 0xc2, 0xb9, 0xc4, 0x5e, 0xe7, 0xd8, 0x8f, 0x96, 0x62, 0xf7, 0x45, 0x44, 0x02, 0x5b, 0xa5, 0x39, - 0xbd, 0xb5, 0x21, 0x2b, 0x6e, 0xfc, 0x5a, 0x86, 0xda, 0x02, 0x8b, 0xb8, 0x21, 0xb6, 0x1f, 0x79, - 0x8c, 0xd7, 0xb3, 0x88, 0x85, 0x12, 0x0f, 0x31, 0x8d, 0x5c, 0x5e, 0x27, 0x05, 0xc7, 0x22, 0x7a, - 0x0a, 0x75, 0x1a, 0xb9, 0x23, 0xff, 0x62, 0x44, 0xdf, 0x44, 0x56, 0x48, 0x26, 0xa3, 0x09, 0x99, - 0x39, 0x16, 0x9f, 0x68, 0x5e, 0x2a, 0xfc, 0x80, 0x46, 0xee, 0xf9, 0x45, 0x5f, 0xbc, 0xb6, 0x93, - 0x47, 0x64, 0xc3, 0xdd, 0x71, 0x64, 0x5f, 0x13, 0x36, 0xf2, 0xf9, 0xb0, 0x53, 0x59, 0xae, 0xcf, - 0x57, 0x2b, 0x97, 0xd9, 0xe2, 0x20, 0xe7, 0x02, 0x03, 0xdf, 0x19, 0xe7, 0x55, 0x74, 0x0e, 0x1b, - 0xc2, 0x90, 0xdc, 0x9b, 0xcf, 0xde, 0x09, 0x1d, 0x27, 0x28, 0xfa, 0x8f, 0x0a, 0xdc, 0xb9, 0xf1, - 0x8b, 0xc8, 0x86, 0x0a, 0xf9, 0x2e, 0x98, 0x3a, 0xb6, 0xc3, 0xe4, 0xec, 0x75, 0xfe, 0x4d, 0x06, - 0x66, 0x47, 0x82, 0x1d, 0xaf, 0xe1, 0x14, 0x58, 0x37, 0xa0, 0x92, 0xd8, 0xd1, 0x43, 0x28, 0x8f, - 0xfd, 0xc8, 0x9b, 0xd0, 0xba, 0xb2, 0x53, 0xdc, 0x55, 0xb0, 0xd4, 0x5a, 0x65, 0x71, 0xa6, 0x75, - 0x0a, 0x65, 0x81, 0xf8, 0x37, 0x3d, 0xec, 0xc7, 0x84, 0x89, 0x1b, 0x4c, 0xad, 0x90, 0x37, 0x52, - 0x6d, 0x3e, 0x5d, 0x91, 0x70, 0x47, 0x86, 0xe3, 0x14, 0x48, 0xff, 0xbe, 0x10, 0x33, 0x14, 0xca, - 0xcd, 0x65, 0x56, 0x92, 0x65, 0xbe, 0xb1, 0xa5, 0x85, 0x55, 0xb6, 0xf4, 0x1b, 0x50, 0x2d, 0xc6, - 0x2c, 0xfb, 0xca, 0x25, 0xd9, 0xad, 0x39, 0x7e, 0x47, 0xd2, 0xe6, 0x41, 0x06, 0xd5, 0xf1, 0x58, - 0x38, 0xc7, 0x79, 0x70, 0xfd, 0x05, 0x68, 0xb7, 0x1d, 0xfe, 0xe2, 0x74, 0xa7, 0x19, 0x16, 0x72, - 0xe7, 0xea, 0x79, 0xe1, 0x99, 0x62, 0xfc, 0x5e, 0x84, 0x6a, 0x7e, 0xef, 0xd0, 0x7e, 0xbe, 0x09, - 0x6a, 0x73, 0x6b, 0x21, 0xe5, 0x6e, 0x7a, 0x6b, 0x92, 0x0e, 0x99, 0xd9, 0x96, 0xa9, 0xcd, 0xff, - 0x2f, 0x04, 0xb4, 0xb3, 0xc3, 0x23, 0x76, 0xf0, 0x0c, 0x2a, 0xd4, 0xb3, 0x02, 0x7a, 0xe5, 0x33, - 0xf9, 0x0d, 0xd1, 0x7c, 0xeb, 0xbb, 0x60, 0xf6, 0x65, 0x24, 0x4e, 0x31, 0xf4, 0x9f, 0x0b, 0x50, - 0x49, 0xcc, 0xff, 0x05, 0xff, 0x37, 0x50, 0x0b, 0x48, 0x68, 0x13, 0x8f, 0x39, 0xc9, 0x99, 0x4d, - 0xba, 0xdc, 0x5e, 0x3d, 0x11, 0x93, 0xab, 0x07, 0xac, 0x97, 0x42, 0x62, 0x2d, 0x83, 0x17, 0xff, - 0x5c, 0x7a, 0x17, 0x6a, 0x0b, 0x6e, 0x68, 0x1b, 0x20, 0x73, 0x94, 0xc3, 0x9b, 0xb3, 0xdc, 0xec, - 0x7a, 0x32, 0xd7, 0xad, 0x19, 0x6c, 0x3b, 0xfe, 0x32, 0x9a, 0xad, 0xaa, 0xf8, 0x2a, 0xa2, 0xbd, - 0xf8, 0xa1, 0xa7, 0xbc, 0x6e, 0x5f, 0x3a, 0xec, 0x2a, 0x1a, 0x9b, 0xb6, 0xef, 0x36, 0x44, 0xcc, - 0x9e, 0xe3, 0x51, 0x16, 0x46, 0xf1, 0xcc, 0xf1, 0xeb, 0xd8, 0xc8, 0xe0, 0xf6, 0xc4, 0x27, 0xef, - 0x25, 0xf1, 0xf6, 0x2e, 0xf3, 0x9f, 0xe0, 0xe3, 0x32, 0x7f, 0xf8, 0xf4, 0xcf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x8e, 0xfc, 0xd7, 0x46, 0xa8, 0x0b, 0x00, 0x00, -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go deleted file mode 100644 index 38faa9fdf1..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/resource/v1/resource.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Resource information. -type Resource struct { - // Type identifier for the resource. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Set of labels that describe the resource. - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_584700775a2fc762, []int{0} -} - -func (m *Resource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Resource.Unmarshal(m, b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) -} -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return xxx_messageInfo_Resource.Size(m) -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) -} - -var xxx_messageInfo_Resource proto.InternalMessageInfo - -func (m *Resource) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Resource) GetLabels() map[string]string { - if m != nil { - return m.Labels - } - return nil -} - -func init() { - proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource") - proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry") -} - -func init() { - proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762) -} - -var fileDescriptor_584700775a2fc762 = []byte{ - // 234 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd, - 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d, - 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08, - 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe, - 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, - 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, - 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25, - 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3, - 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, - 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf, - 0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19, - 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5, - 0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99, - 0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go deleted file mode 100644 index 4de05355a4..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go +++ /dev/null @@ -1,1543 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/trace/v1/trace.proto - -package v1 - -import ( - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type Span_SpanKind int32 - -const ( - // Unspecified. - Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - Span_SERVER Span_SpanKind = 1 - // Indicates that the span covers the client-side wrapper around an RPC or - // other remote request. - Span_CLIENT Span_SpanKind = 2 -) - -var Span_SpanKind_name = map[int32]string{ - 0: "SPAN_KIND_UNSPECIFIED", - 1: "SERVER", - 2: "CLIENT", -} - -var Span_SpanKind_value = map[string]int32{ - "SPAN_KIND_UNSPECIFIED": 0, - "SERVER": 1, - "CLIENT": 2, -} - -func (x Span_SpanKind) String() string { - return proto.EnumName(Span_SpanKind_name, int32(x)) -} - -func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 0} -} - -// Indicates whether the message was sent or received. -type Span_TimeEvent_MessageEvent_Type int32 - -const ( - // Unknown event type. - Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 - // Indicates a sent message. - Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 - // Indicates a received message. - Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 -) - -var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "SENT", - 2: "RECEIVED", -} - -var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "SENT": 1, - "RECEIVED": 2, -} - -func (x Span_TimeEvent_MessageEvent_Type) String() string { - return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) -} - -func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0} -} - -// The relationship of the current span relative to the linked span: child, -// parent, or unspecified. -type Span_Link_Type int32 - -const ( - // The relationship of the two spans is unknown, or known but other - // than parent-child. - Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 - // The linked span is a child of the current span. - Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 - // The linked span is a parent of the current span. - Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 -) - -var Span_Link_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "CHILD_LINKED_SPAN", - 2: "PARENT_LINKED_SPAN", -} - -var Span_Link_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "CHILD_LINKED_SPAN": 1, - "PARENT_LINKED_SPAN": 2, -} - -func (x Span_Link_Type) String() string { - return proto.EnumName(Span_Link_Type_name, int32(x)) -} - -func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0} -} - -// A span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Spans may also be linked to other spans -// from the same or different trace. And form graphs. Often, a trace -// contains a root span that describes the end-to-end latency, and one -// or more subspans for its sub-operations. A trace can also contain -// multiple root spans, or none at all. Spans do not need to be -// contiguous - there may be gaps or overlaps between spans in a trace. -// -// The next id is 17. -// TODO(bdrutu): Add an example. -type Span struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes - // is considered invalid. - // - // This field is semantically required. Receiver should generate new - // random trace_id if empty or invalid trace_id was received. - // - // This field is required. - TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes is considered - // invalid. - // - // This field is semantically required. Receiver should generate new - // random span_id if empty or invalid span_id was received. - // - // This field is required. - SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // The Tracestate on the span. - Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"` - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // When null or empty string received - receiver may use string "name" - // as a replacement. There might be smarted algorithms implemented by - // receiver to fix the empty span name. - // - // This field is required. - Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` - // The start time of the span. On the client side, this is the time kept by - // the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // - // This field is semantically required. When not set on receive - - // receiver should set it to the value of end_time field if it was - // set. Or to the current time if neither was set. It is important to - // keep end_time > start_time for consistency. - // - // This field is required. - StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - // The end time of the span. On the client side, this is the time kept by - // the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // - // This field is semantically required. When not set on receive - - // receiver should set it to start_time value. It is important to - // keep end_time > start_time for consistency. - // - // This field is required. - EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - // A set of attributes on the span. - Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` - // A stack trace captured at the start of the span. - StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` - // The included time events. - TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` - // The included links. - Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` - // An optional final status for this span. Semantically when Status - // wasn't set it is means span ended without errors and assume - // Status.Ok (code = 0). - Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` - // An optional resource that is associated with this span. If not set, this span - // should be part of a batch that does include the resource information, unless resource - // information is unknown. - Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"` - // A highly recommended but not required flag that identifies when a - // trace crosses a process boundary. True when the parent_span belongs - // to the same process as the current span. This flag is most commonly - // used to indicate the need to adjust time as clocks in different - // processes may not be synchronized. - SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` - // An optional number of child spans that were generated while this span - // was active. If set, allows an implementation to detect missing child spans. - ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span) Reset() { *m = Span{} } -func (m *Span) String() string { return proto.CompactTextString(m) } -func (*Span) ProtoMessage() {} -func (*Span) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0} -} - -func (m *Span) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span.Unmarshal(m, b) -} -func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span.Marshal(b, m, deterministic) -} -func (m *Span) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span.Merge(m, src) -} -func (m *Span) XXX_Size() int { - return xxx_messageInfo_Span.Size(m) -} -func (m *Span) XXX_DiscardUnknown() { - xxx_messageInfo_Span.DiscardUnknown(m) -} - -var xxx_messageInfo_Span proto.InternalMessageInfo - -func (m *Span) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Span) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *Span) GetTracestate() *Span_Tracestate { - if m != nil { - return m.Tracestate - } - return nil -} - -func (m *Span) GetParentSpanId() []byte { - if m != nil { - return m.ParentSpanId - } - return nil -} - -func (m *Span) GetName() *TruncatableString { - if m != nil { - return m.Name - } - return nil -} - -func (m *Span) GetKind() Span_SpanKind { - if m != nil { - return m.Kind - } - return Span_SPAN_KIND_UNSPECIFIED -} - -func (m *Span) GetStartTime() *timestamp.Timestamp { - if m != nil { - return m.StartTime - } - return nil -} - -func (m *Span) GetEndTime() *timestamp.Timestamp { - if m != nil { - return m.EndTime - } - return nil -} - -func (m *Span) GetAttributes() *Span_Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span) GetStackTrace() *StackTrace { - if m != nil { - return m.StackTrace - } - return nil -} - -func (m *Span) GetTimeEvents() *Span_TimeEvents { - if m != nil { - return m.TimeEvents - } - return nil -} - -func (m *Span) GetLinks() *Span_Links { - if m != nil { - return m.Links - } - return nil -} - -func (m *Span) GetStatus() *Status { - if m != nil { - return m.Status - } - return nil -} - -func (m *Span) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - -func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { - if m != nil { - return m.SameProcessAsParentSpan - } - return nil -} - -func (m *Span) GetChildSpanCount() *wrappers.UInt32Value { - if m != nil { - return m.ChildSpanCount - } - return nil -} - -// This field conveys information about request position in multiple distributed tracing graphs. -// It is a list of Tracestate.Entry with a maximum of 32 members in the list. -// -// See the https://github.com/w3c/distributed-tracing for more details about this field. -type Span_Tracestate struct { - // A list of entries that represent the Tracestate. - Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} } -func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) } -func (*Span_Tracestate) ProtoMessage() {} -func (*Span_Tracestate) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 0} -} - -func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b) -} -func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic) -} -func (m *Span_Tracestate) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Tracestate.Merge(m, src) -} -func (m *Span_Tracestate) XXX_Size() int { - return xxx_messageInfo_Span_Tracestate.Size(m) -} -func (m *Span_Tracestate) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Tracestate.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo - -func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry { - if m != nil { - return m.Entries - } - return nil -} - -type Span_Tracestate_Entry struct { - // The key must begin with a lowercase letter, and can only contain - // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes - // '-', asterisks '*', and forward slashes '/'. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // The value is opaque string up to 256 characters printable ASCII - // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. - // Note that this also excludes tabs, newlines, carriage returns, etc. - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} } -func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) } -func (*Span_Tracestate_Entry) ProtoMessage() {} -func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0} -} - -func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b) -} -func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic) -} -func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src) -} -func (m *Span_Tracestate_Entry) XXX_Size() int { - return xxx_messageInfo_Span_Tracestate_Entry.Size(m) -} -func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo - -func (m *Span_Tracestate_Entry) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *Span_Tracestate_Entry) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// A set of attributes, each with a key and a value. -type Span_Attributes struct { - // The set of attributes. The value can be a string, an integer, a double - // or the Boolean values `true` or `false`. Note, global attributes like - // server name can be set as tags using resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The number of attributes that were discarded. Attributes can be discarded - // because their keys are too long or because there are too many attributes. - // If this value is 0, then no attributes were dropped. - DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } -func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } -func (*Span_Attributes) ProtoMessage() {} -func (*Span_Attributes) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 1} -} - -func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) -} -func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) -} -func (m *Span_Attributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Attributes.Merge(m, src) -} -func (m *Span_Attributes) XXX_Size() int { - return xxx_messageInfo_Span_Attributes.Size(m) -} -func (m *Span_Attributes) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Attributes.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo - -func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { - if m != nil { - return m.AttributeMap - } - return nil -} - -func (m *Span_Attributes) GetDroppedAttributesCount() int32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -// A time-stamped annotation or message event in the Span. -type Span_TimeEvent struct { - // The time the event occurred. - Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` - // A `TimeEvent` can contain either an `Annotation` object or a - // `MessageEvent` object, but not both. - // - // Types that are valid to be assigned to Value: - // *Span_TimeEvent_Annotation_ - // *Span_TimeEvent_MessageEvent_ - Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } -func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent) ProtoMessage() {} -func (*Span_TimeEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2} -} - -func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) -} -func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvent.Merge(m, src) -} -func (m *Span_TimeEvent) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvent.Size(m) -} -func (m *Span_TimeEvent) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo - -func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { - if m != nil { - return m.Time - } - return nil -} - -type isSpan_TimeEvent_Value interface { - isSpan_TimeEvent_Value() -} - -type Span_TimeEvent_Annotation_ struct { - Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` -} - -type Span_TimeEvent_MessageEvent_ struct { - MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` -} - -func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} - -func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} - -func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { - if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { - return x.Annotation - } - return nil -} - -func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { - if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { - return x.MessageEvent - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Span_TimeEvent_Annotation_)(nil), - (*Span_TimeEvent_MessageEvent_)(nil), - } -} - -// A text annotation with a set of attributes. -type Span_TimeEvent_Annotation struct { - // A user-supplied message describing the event. - Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // A set of attributes on the annotation. - Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } -func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent_Annotation) ProtoMessage() {} -func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0} -} - -func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) -} -func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) -} -func (m *Span_TimeEvent_Annotation) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) -} -func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo - -func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { - if m != nil { - return m.Description - } - return nil -} - -func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// An event describing a message sent/received between Spans. -type Span_TimeEvent_MessageEvent struct { - // The type of MessageEvent. Indicates whether the message was sent or - // received. - Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` - // An identifier for the MessageEvent's message that can be used to match - // SENT and RECEIVED MessageEvents. For example, this field could - // represent a sequence ID for a streaming RPC. It is recommended to be - // unique within a Span. - Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` - // The number of uncompressed bytes sent or received. - UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"` - // The number of compressed bytes sent or received. If zero, assumed to - // be the same size as uncompressed. - CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } -func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} -func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1} -} - -func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) -} -func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) -} -func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) -} -func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo - -func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { - if m != nil { - return m.Type - } - return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED -} - -func (m *Span_TimeEvent_MessageEvent) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 { - if m != nil { - return m.UncompressedSize - } - return 0 -} - -func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 { - if m != nil { - return m.CompressedSize - } - return 0 -} - -// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation -// on the span, consisting of either user-supplied key-value pairs, or -// details of a message sent/received between Spans. -type Span_TimeEvents struct { - // A collection of `TimeEvent`s. - TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` - // The number of dropped annotations in all the included time events. - // If the value is 0, then no annotations were dropped. - DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` - // The number of dropped message events in all the included time events. - // If the value is 0, then no message events were dropped. - DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } -func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvents) ProtoMessage() {} -func (*Span_TimeEvents) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 3} -} - -func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) -} -func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvents.Merge(m, src) -} -func (m *Span_TimeEvents) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvents.Size(m) -} -func (m *Span_TimeEvents) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo - -func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { - if m != nil { - return m.TimeEvent - } - return nil -} - -func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { - if m != nil { - return m.DroppedAnnotationsCount - } - return 0 -} - -func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { - if m != nil { - return m.DroppedMessageEventsCount - } - return 0 -} - -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. -type Span_Link struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // The relationship of the current span relative to the linked span. - Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"` - // A set of attributes on the link. - Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Link) Reset() { *m = Span_Link{} } -func (m *Span_Link) String() string { return proto.CompactTextString(m) } -func (*Span_Link) ProtoMessage() {} -func (*Span_Link) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 4} -} - -func (m *Span_Link) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Link.Unmarshal(m, b) -} -func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) -} -func (m *Span_Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Link.Merge(m, src) -} -func (m *Span_Link) XXX_Size() int { - return xxx_messageInfo_Span_Link.Size(m) -} -func (m *Span_Link) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Link proto.InternalMessageInfo - -func (m *Span_Link) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Span_Link) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *Span_Link) GetType() Span_Link_Type { - if m != nil { - return m.Type - } - return Span_Link_TYPE_UNSPECIFIED -} - -func (m *Span_Link) GetAttributes() *Span_Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// A collection of links, which are references from this span to a span -// in the same or different trace. -type Span_Links struct { - // A collection of links. - Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` - // The number of dropped links after the maximum size was enforced. If - // this value is 0, then no links were dropped. - DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Links) Reset() { *m = Span_Links{} } -func (m *Span_Links) String() string { return proto.CompactTextString(m) } -func (*Span_Links) ProtoMessage() {} -func (*Span_Links) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 5} -} - -func (m *Span_Links) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Links.Unmarshal(m, b) -} -func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) -} -func (m *Span_Links) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Links.Merge(m, src) -} -func (m *Span_Links) XXX_Size() int { - return xxx_messageInfo_Span_Links.Size(m) -} -func (m *Span_Links) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Links.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Links proto.InternalMessageInfo - -func (m *Span_Links) GetLink() []*Span_Link { - if m != nil { - return m.Link - } - return nil -} - -func (m *Span_Links) GetDroppedLinksCount() int32 { - if m != nil { - return m.DroppedLinksCount - } - return 0 -} - -// The `Status` type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. This proto's fields -// are a subset of those of -// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), -// which is used by [gRPC](https://github.com/grpc). -type Status struct { - // The status code. This is optional field. It is safe to assume 0 (OK) - // when not set. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // A developer-facing error message, which should be in English. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{1} -} - -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -func (m *Status) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *Status) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -// The value of an Attribute. -type AttributeValue struct { - // The type of the value. - // - // Types that are valid to be assigned to Value: - // *AttributeValue_StringValue - // *AttributeValue_IntValue - // *AttributeValue_BoolValue - // *AttributeValue_DoubleValue - Value isAttributeValue_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AttributeValue) Reset() { *m = AttributeValue{} } -func (m *AttributeValue) String() string { return proto.CompactTextString(m) } -func (*AttributeValue) ProtoMessage() {} -func (*AttributeValue) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{2} -} - -func (m *AttributeValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AttributeValue.Unmarshal(m, b) -} -func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) -} -func (m *AttributeValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributeValue.Merge(m, src) -} -func (m *AttributeValue) XXX_Size() int { - return xxx_messageInfo_AttributeValue.Size(m) -} -func (m *AttributeValue) XXX_DiscardUnknown() { - xxx_messageInfo_AttributeValue.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributeValue proto.InternalMessageInfo - -type isAttributeValue_Value interface { - isAttributeValue_Value() -} - -type AttributeValue_StringValue struct { - StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type AttributeValue_IntValue struct { - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` -} - -type AttributeValue_BoolValue struct { - BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type AttributeValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -func (*AttributeValue_StringValue) isAttributeValue_Value() {} - -func (*AttributeValue_IntValue) isAttributeValue_Value() {} - -func (*AttributeValue_BoolValue) isAttributeValue_Value() {} - -func (*AttributeValue_DoubleValue) isAttributeValue_Value() {} - -func (m *AttributeValue) GetValue() isAttributeValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *AttributeValue) GetStringValue() *TruncatableString { - if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { - return x.StringValue - } - return nil -} - -func (m *AttributeValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *AttributeValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *AttributeValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AttributeValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AttributeValue_StringValue)(nil), - (*AttributeValue_IntValue)(nil), - (*AttributeValue_BoolValue)(nil), - (*AttributeValue_DoubleValue)(nil), - } -} - -// The call stack which originated this span. -type StackTrace struct { - // Stack frames in this stack trace. - StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` - // The hash ID is used to conserve network bandwidth for duplicate - // stack traces within a single trace. - // - // Often multiple spans will have identical stack traces. - // The first occurrence of a stack trace should contain both - // `stack_frames` and a value in `stack_trace_hash_id`. - // - // Subsequent spans within the same request can refer - // to that stack trace by setting only `stack_trace_hash_id`. - // - // TODO: describe how to deal with the case where stack_trace_hash_id is - // zero because it was not set. - StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StackTrace) Reset() { *m = StackTrace{} } -func (m *StackTrace) String() string { return proto.CompactTextString(m) } -func (*StackTrace) ProtoMessage() {} -func (*StackTrace) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{3} -} - -func (m *StackTrace) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StackTrace.Unmarshal(m, b) -} -func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) -} -func (m *StackTrace) XXX_Merge(src proto.Message) { - xxx_messageInfo_StackTrace.Merge(m, src) -} -func (m *StackTrace) XXX_Size() int { - return xxx_messageInfo_StackTrace.Size(m) -} -func (m *StackTrace) XXX_DiscardUnknown() { - xxx_messageInfo_StackTrace.DiscardUnknown(m) -} - -var xxx_messageInfo_StackTrace proto.InternalMessageInfo - -func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { - if m != nil { - return m.StackFrames - } - return nil -} - -func (m *StackTrace) GetStackTraceHashId() uint64 { - if m != nil { - return m.StackTraceHashId - } - return 0 -} - -// A single stack frame in a stack trace. -type StackTrace_StackFrame struct { - // The fully-qualified name that uniquely identifies the function or - // method that is active in this frame. - FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` - // An un-mangled function name, if `function_name` is - // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can - // be fully qualified. - OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` - // The name of the source file where the function call appears. - FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` - // The line number in `file_name` where the function call appears. - LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` - // The column number where the function call appears, if available. - // This is important in JavaScript because of its anonymous functions. - ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` - // The binary module from where the code was loaded. - LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` - // The version of the deployed source code. - SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } -func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } -func (*StackTrace_StackFrame) ProtoMessage() {} -func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{3, 0} -} - -func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) -} -func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) -} -func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { - xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) -} -func (m *StackTrace_StackFrame) XXX_Size() int { - return xxx_messageInfo_StackTrace_StackFrame.Size(m) -} -func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { - xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) -} - -var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo - -func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { - if m != nil { - return m.FunctionName - } - return nil -} - -func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { - if m != nil { - return m.OriginalFunctionName - } - return nil -} - -func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { - if m != nil { - return m.FileName - } - return nil -} - -func (m *StackTrace_StackFrame) GetLineNumber() int64 { - if m != nil { - return m.LineNumber - } - return 0 -} - -func (m *StackTrace_StackFrame) GetColumnNumber() int64 { - if m != nil { - return m.ColumnNumber - } - return 0 -} - -func (m *StackTrace_StackFrame) GetLoadModule() *Module { - if m != nil { - return m.LoadModule - } - return nil -} - -func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { - if m != nil { - return m.SourceVersion - } - return nil -} - -// A collection of stack frames, which can be truncated. -type StackTrace_StackFrames struct { - // Stack frames in this call stack. - Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` - // The number of stack frames that were dropped because there - // were too many stack frames. - // If this value is 0, then no stack frames were dropped. - DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } -func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } -func (*StackTrace_StackFrames) ProtoMessage() {} -func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{3, 1} -} - -func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) -} -func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) -} -func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { - xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) -} -func (m *StackTrace_StackFrames) XXX_Size() int { - return xxx_messageInfo_StackTrace_StackFrames.Size(m) -} -func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { - xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) -} - -var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo - -func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { - if m != nil { - return m.Frame - } - return nil -} - -func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { - if m != nil { - return m.DroppedFramesCount - } - return 0 -} - -// A description of a binary module. -type Module struct { - // TODO: document the meaning of this field. - // For example: main binary, kernel modules, and dynamic libraries - // such as libc.so, sharedlib.so. - Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` - // A unique identifier for the module, usually a hash of its - // contents. - BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Module) Reset() { *m = Module{} } -func (m *Module) String() string { return proto.CompactTextString(m) } -func (*Module) ProtoMessage() {} -func (*Module) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{4} -} - -func (m *Module) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Module.Unmarshal(m, b) -} -func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Module.Marshal(b, m, deterministic) -} -func (m *Module) XXX_Merge(src proto.Message) { - xxx_messageInfo_Module.Merge(m, src) -} -func (m *Module) XXX_Size() int { - return xxx_messageInfo_Module.Size(m) -} -func (m *Module) XXX_DiscardUnknown() { - xxx_messageInfo_Module.DiscardUnknown(m) -} - -var xxx_messageInfo_Module proto.InternalMessageInfo - -func (m *Module) GetModule() *TruncatableString { - if m != nil { - return m.Module - } - return nil -} - -func (m *Module) GetBuildId() *TruncatableString { - if m != nil { - return m.BuildId - } - return nil -} - -// A string that might be shortened to a specified length. -type TruncatableString struct { - // The shortened string. For example, if the original string was 500 bytes long and - // the limit of the string was 128 bytes, then this value contains the first 128 - // bytes of the 500-byte string. Note that truncation always happens on a - // character boundary, to ensure that a truncated string is still valid UTF-8. - // Because it may contain multi-byte characters, the size of the truncated string - // may be less than the truncation limit. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - // The number of bytes removed from the original string. If this - // value is 0, then the string was not shortened. - TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TruncatableString) Reset() { *m = TruncatableString{} } -func (m *TruncatableString) String() string { return proto.CompactTextString(m) } -func (*TruncatableString) ProtoMessage() {} -func (*TruncatableString) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{5} -} - -func (m *TruncatableString) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TruncatableString.Unmarshal(m, b) -} -func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) -} -func (m *TruncatableString) XXX_Merge(src proto.Message) { - xxx_messageInfo_TruncatableString.Merge(m, src) -} -func (m *TruncatableString) XXX_Size() int { - return xxx_messageInfo_TruncatableString.Size(m) -} -func (m *TruncatableString) XXX_DiscardUnknown() { - xxx_messageInfo_TruncatableString.DiscardUnknown(m) -} - -var xxx_messageInfo_TruncatableString proto.InternalMessageInfo - -func (m *TruncatableString) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *TruncatableString) GetTruncatedByteCount() int32 { - if m != nil { - return m.TruncatedByteCount - } - return 0 -} - -func init() { - proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) - proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) - proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) - proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span") - proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate") - proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry") - proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes") - proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry") - proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent") - proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation") - proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent") - proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents") - proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link") - proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links") - proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status") - proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue") - proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace") - proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame") - proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames") - proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module") - proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString") -} - -func init() { - proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584) -} - -var fileDescriptor_8ea38bbb821bf584 = []byte{ - // 1557 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xeb, 0x52, 0x1b, 0x47, - 0x16, 0x66, 0x74, 0xd7, 0x91, 0x90, 0x45, 0x1b, 0xdb, 0x83, 0xd6, 0xbb, 0x66, 0x65, 0x7b, 0x17, - 0xaf, 0x17, 0x61, 0xb0, 0xd7, 0xe5, 0x6b, 0x79, 0x11, 0x88, 0x48, 0x06, 0x2b, 0x72, 0x4b, 0xa6, - 0x72, 0xa9, 0xd4, 0xd4, 0x48, 0xd3, 0x88, 0x09, 0x52, 0xcf, 0x64, 0xa6, 0x87, 0x14, 0x7e, 0x81, - 0x54, 0x2a, 0xff, 0x52, 0x95, 0xca, 0x0b, 0xe4, 0x47, 0x5e, 0x24, 0x0f, 0x90, 0xca, 0x73, 0xe4, - 0x09, 0xf2, 0x27, 0xd5, 0xdd, 0x73, 0x13, 0xd8, 0xa0, 0xc8, 0x7f, 0xa8, 0x9e, 0xee, 0xf3, 0x7d, - 0x7d, 0x4e, 0x9f, 0x2b, 0x82, 0xdb, 0x96, 0x4d, 0xe8, 0x80, 0x50, 0xd7, 0x73, 0xd7, 0x6c, 0xc7, - 0x62, 0xd6, 0x1a, 0x73, 0xf4, 0x01, 0x59, 0x3b, 0x5e, 0x97, 0x8b, 0x9a, 0xd8, 0x44, 0x4b, 0x91, - 0x98, 0xdc, 0xa9, 0xc9, 0xd3, 0xe3, 0xf5, 0xca, 0xdd, 0x33, 0x0c, 0x0e, 0x71, 0x2d, 0xcf, 0x91, - 0x24, 0xc1, 0x5a, 0xa2, 0x2a, 0x37, 0x86, 0x96, 0x35, 0x1c, 0x11, 0x29, 0xd8, 0xf7, 0x0e, 0xd6, - 0x98, 0x39, 0x26, 0x2e, 0xd3, 0xc7, 0xb6, 0x2f, 0xf0, 0x8f, 0xd3, 0x02, 0x5f, 0x3b, 0xba, 0x6d, - 0x13, 0xc7, 0xbf, 0xb6, 0xfa, 0xcb, 0x15, 0x48, 0x75, 0x6d, 0x9d, 0xa2, 0x25, 0xc8, 0x09, 0x15, - 0x34, 0xd3, 0x50, 0x95, 0x65, 0x65, 0xa5, 0x88, 0xb3, 0xe2, 0xbb, 0x65, 0xa0, 0x6b, 0x90, 0x75, - 0x6d, 0x9d, 0xf2, 0x93, 0x84, 0x38, 0xc9, 0xf0, 0xcf, 0x96, 0x81, 0x5e, 0x02, 0x08, 0x19, 0x97, - 0xe9, 0x8c, 0xa8, 0x97, 0x96, 0x95, 0x95, 0xc2, 0xc6, 0x7f, 0x6a, 0xef, 0x35, 0xad, 0xc6, 0x2f, - 0xaa, 0xf5, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x2d, 0x28, 0xd9, 0xba, 0x43, 0x28, 0xd3, 0x82, 0xbb, - 0x92, 0xe2, 0xae, 0xa2, 0xdc, 0xed, 0xca, 0x1b, 0xff, 0x0f, 0x29, 0xaa, 0x8f, 0x89, 0x9a, 0x12, - 0x77, 0xfd, 0xf7, 0x9c, 0xbb, 0x7a, 0x8e, 0x47, 0x07, 0x3a, 0xd3, 0xfb, 0x23, 0xd2, 0x65, 0x8e, - 0x49, 0x87, 0x58, 0x20, 0xd1, 0x33, 0x48, 0x1d, 0x99, 0xd4, 0x50, 0x4b, 0xcb, 0xca, 0x4a, 0x69, - 0x63, 0xe5, 0x22, 0x6d, 0xf9, 0x9f, 0x5d, 0x93, 0x1a, 0x58, 0xa0, 0xd0, 0x63, 0x00, 0x97, 0xe9, - 0x0e, 0xd3, 0xf8, 0x3b, 0xab, 0x69, 0xa1, 0x45, 0xa5, 0x26, 0xdf, 0xb8, 0x16, 0xbc, 0x71, 0xad, - 0x17, 0x38, 0x01, 0xe7, 0x85, 0x34, 0xff, 0x46, 0xff, 0x83, 0x1c, 0xa1, 0x86, 0x04, 0x66, 0x2e, - 0x04, 0x66, 0x09, 0x35, 0x04, 0xec, 0x25, 0x80, 0xce, 0x98, 0x63, 0xf6, 0x3d, 0x46, 0x5c, 0x35, - 0x3b, 0xdd, 0x1b, 0x6f, 0x86, 0x08, 0x1c, 0x43, 0xa3, 0x1d, 0x28, 0xb8, 0x4c, 0x1f, 0x1c, 0x69, - 0x42, 0x5a, 0xcd, 0x09, 0xb2, 0xdb, 0xe7, 0x91, 0x71, 0x69, 0xe1, 0x30, 0x0c, 0x6e, 0xb8, 0x46, - 0xbb, 0x50, 0xe0, 0x66, 0x68, 0xe4, 0x98, 0x50, 0xe6, 0xaa, 0xf9, 0x29, 0x1d, 0x6f, 0x8e, 0x49, - 0x43, 0x20, 0x30, 0xb0, 0x70, 0x8d, 0x9e, 0x42, 0x7a, 0x64, 0xd2, 0x23, 0x57, 0x85, 0x8b, 0xd5, - 0xe1, 0x34, 0x7b, 0x5c, 0x18, 0x4b, 0x0c, 0x7a, 0x0c, 0x19, 0x1e, 0x3e, 0x9e, 0xab, 0x16, 0x04, - 0xfa, 0x9f, 0xe7, 0x1b, 0xc3, 0x3c, 0x17, 0xfb, 0x00, 0x54, 0x87, 0x5c, 0x90, 0x4c, 0x6a, 0x59, - 0x80, 0xff, 0x75, 0x16, 0x1c, 0xa6, 0xdb, 0xf1, 0x7a, 0x0d, 0xfb, 0x6b, 0x1c, 0xe2, 0xd0, 0x27, - 0xf0, 0x37, 0x57, 0x1f, 0x13, 0xcd, 0x76, 0xac, 0x01, 0x71, 0x5d, 0x4d, 0x77, 0xb5, 0x58, 0x10, - 0xab, 0xc5, 0xf7, 0xb8, 0xb9, 0x6e, 0x59, 0xa3, 0x7d, 0x7d, 0xe4, 0x11, 0x7c, 0x8d, 0xc3, 0x3b, - 0x12, 0xbd, 0xe9, 0x76, 0xc2, 0x50, 0x47, 0x3b, 0x50, 0x1e, 0x1c, 0x9a, 0x23, 0x43, 0x66, 0xc3, - 0xc0, 0xf2, 0x28, 0x53, 0xe7, 0x05, 0xdd, 0xf5, 0x33, 0x74, 0x6f, 0x5a, 0x94, 0xdd, 0xdf, 0x90, - 0x84, 0x25, 0x81, 0xe2, 0x14, 0x5b, 0x1c, 0x53, 0xf9, 0x56, 0x01, 0x88, 0x32, 0x0e, 0xbd, 0x84, - 0x2c, 0xa1, 0xcc, 0x31, 0x89, 0xab, 0x2a, 0xcb, 0xc9, 0x95, 0xc2, 0xc6, 0xbd, 0xe9, 0xd3, 0xb5, - 0xd6, 0xa0, 0xcc, 0x39, 0xc1, 0x01, 0x41, 0x65, 0x0d, 0xd2, 0x62, 0x07, 0x95, 0x21, 0x79, 0x44, - 0x4e, 0x44, 0xd5, 0xc8, 0x63, 0xbe, 0x44, 0x8b, 0x90, 0x3e, 0xe6, 0xea, 0x88, 0x7a, 0x91, 0xc7, - 0xf2, 0xa3, 0xf2, 0x43, 0x02, 0x20, 0x8a, 0x4c, 0xa4, 0xc3, 0x7c, 0x18, 0x9b, 0xda, 0x58, 0xb7, - 0x7d, 0x8d, 0x9e, 0x4d, 0x1f, 0xdc, 0xd1, 0xf2, 0x95, 0x6e, 0x4b, 0xed, 0x8a, 0x7a, 0x6c, 0x0b, - 0x3d, 0x02, 0xd5, 0x70, 0x2c, 0xdb, 0x26, 0x86, 0x16, 0xa5, 0x81, 0xff, 0x9a, 0x5c, 0xb5, 0x34, - 0xbe, 0xea, 0x9f, 0x47, 0xa4, 0xf2, 0xdd, 0xbe, 0x84, 0x85, 0x33, 0xe4, 0xef, 0x30, 0xf4, 0x45, - 0xdc, 0xd0, 0xc2, 0xc6, 0x9d, 0x73, 0x74, 0x0f, 0xe9, 0xa4, 0xa3, 0x24, 0xee, 0x49, 0xe2, 0x91, - 0x52, 0xf9, 0x29, 0x0d, 0xf9, 0x30, 0x39, 0x50, 0x0d, 0x52, 0xa2, 0x46, 0x28, 0x17, 0xd6, 0x08, - 0x21, 0x87, 0xf6, 0x01, 0x74, 0x4a, 0x2d, 0xa6, 0x33, 0xd3, 0xa2, 0xbe, 0x1e, 0x0f, 0xa6, 0xce, - 0xc5, 0xda, 0x66, 0x88, 0x6d, 0xce, 0xe1, 0x18, 0x13, 0xfa, 0x02, 0xe6, 0xc7, 0xc4, 0x75, 0xf5, - 0xa1, 0x9f, 0xe7, 0xa2, 0x1e, 0x17, 0x36, 0x1e, 0x4e, 0x4f, 0xfd, 0x4a, 0xc2, 0xc5, 0x47, 0x73, - 0x0e, 0x17, 0xc7, 0xb1, 0xef, 0xca, 0xcf, 0x0a, 0x40, 0x74, 0x37, 0x6a, 0x43, 0xc1, 0x20, 0xee, - 0xc0, 0x31, 0x6d, 0x61, 0x86, 0x32, 0x43, 0x7d, 0x8f, 0x13, 0x9c, 0x2a, 0x9b, 0x89, 0x0f, 0x29, - 0x9b, 0x95, 0x3f, 0x14, 0x28, 0xc6, 0x6d, 0x41, 0x1f, 0x43, 0x8a, 0x9d, 0xd8, 0xd2, 0x45, 0xa5, - 0x8d, 0xa7, 0xb3, 0xbd, 0x48, 0xad, 0x77, 0x62, 0x13, 0x2c, 0x88, 0x50, 0x09, 0x12, 0x7e, 0x73, - 0x4d, 0xe1, 0x84, 0x69, 0xa0, 0xbb, 0xb0, 0xe0, 0xd1, 0x81, 0x35, 0xb6, 0x1d, 0xe2, 0xba, 0xc4, - 0xd0, 0x5c, 0xf3, 0x2d, 0x11, 0xef, 0x9f, 0xc2, 0xe5, 0xf8, 0x41, 0xd7, 0x7c, 0x4b, 0xd0, 0xbf, - 0xe1, 0xd2, 0x69, 0xd1, 0x94, 0x10, 0x2d, 0x4d, 0x0a, 0x56, 0x1f, 0x40, 0x8a, 0xdf, 0x89, 0x16, - 0xa1, 0xdc, 0xfb, 0xb4, 0xd3, 0xd0, 0xde, 0xb4, 0xbb, 0x9d, 0xc6, 0x56, 0x6b, 0xa7, 0xd5, 0xd8, - 0x2e, 0xcf, 0xa1, 0x1c, 0xa4, 0xba, 0x8d, 0x76, 0xaf, 0xac, 0xa0, 0x22, 0xe4, 0x70, 0x63, 0xab, - 0xd1, 0xda, 0x6f, 0x6c, 0x97, 0x13, 0xf5, 0xac, 0x1f, 0xe2, 0x95, 0xdf, 0x78, 0x29, 0x89, 0xea, - 0x76, 0x13, 0x20, 0x6a, 0x02, 0x7e, 0xee, 0xde, 0x99, 0xfa, 0x29, 0x70, 0x3e, 0x6c, 0x01, 0xe8, - 0x09, 0x2c, 0x85, 0x59, 0x1a, 0x46, 0xc4, 0x64, 0x9a, 0x5e, 0x0b, 0xd2, 0x34, 0x3a, 0x17, 0x79, - 0x8a, 0x5e, 0xc0, 0xf5, 0x00, 0x3b, 0x11, 0xad, 0x01, 0x3c, 0x29, 0xe0, 0x01, 0x7f, 0xfc, 0xfd, - 0xfd, 0x44, 0xff, 0x3e, 0x01, 0x29, 0xde, 0x52, 0x66, 0x1a, 0x80, 0x9e, 0xfb, 0x81, 0x90, 0x14, - 0x81, 0x70, 0x67, 0x9a, 0xd6, 0x15, 0x77, 0xfb, 0x64, 0x90, 0xa6, 0x3e, 0x24, 0x48, 0xab, 0xbb, - 0xe7, 0x3a, 0xf7, 0x0a, 0x2c, 0x6c, 0x35, 0x5b, 0x7b, 0xdb, 0xda, 0x5e, 0xab, 0xbd, 0xdb, 0xd8, - 0xd6, 0xba, 0x9d, 0xcd, 0x76, 0x59, 0x41, 0x57, 0x01, 0x75, 0x36, 0x71, 0xa3, 0xdd, 0x9b, 0xd8, - 0x4f, 0x54, 0xbe, 0x82, 0xb4, 0x68, 0xb3, 0xe8, 0x11, 0xa4, 0x78, 0xa3, 0xf5, 0xdd, 0x7b, 0x6b, - 0x1a, 0x03, 0xb1, 0x40, 0xa0, 0x1a, 0x5c, 0x0e, 0x1c, 0x23, 0x5a, 0xf5, 0x84, 0x3b, 0x17, 0xfc, - 0x23, 0x71, 0x89, 0xf0, 0x43, 0xf5, 0x39, 0xe4, 0x82, 0x59, 0x0b, 0x2d, 0xc1, 0x15, 0xae, 0x88, - 0xb6, 0xdb, 0x6a, 0x6f, 0x9f, 0x32, 0x04, 0x20, 0xd3, 0x6d, 0xe0, 0xfd, 0x06, 0x2e, 0x2b, 0x7c, - 0xbd, 0xb5, 0xd7, 0xe2, 0x31, 0x9b, 0xa8, 0x3e, 0x84, 0x8c, 0xec, 0xef, 0x08, 0x41, 0x6a, 0x60, - 0x19, 0x32, 0x39, 0xd3, 0x58, 0xac, 0x91, 0x0a, 0x59, 0x3f, 0x3a, 0xfc, 0x8e, 0x14, 0x7c, 0x56, - 0x7f, 0x55, 0xa0, 0x34, 0x59, 0x99, 0xd1, 0x6b, 0x28, 0xba, 0xa2, 0xa2, 0x68, 0xb2, 0xb4, 0xcf, - 0x50, 0x8b, 0x9a, 0x73, 0xb8, 0x20, 0x39, 0x24, 0xe5, 0xdf, 0x21, 0x6f, 0x52, 0xa6, 0x45, 0xad, - 0x22, 0xd9, 0x9c, 0xc3, 0x39, 0x93, 0x32, 0x79, 0x7c, 0x03, 0xa0, 0x6f, 0x59, 0x23, 0xff, 0x9c, - 0x07, 0x53, 0xae, 0x39, 0x87, 0xf3, 0xfd, 0x60, 0x4c, 0x40, 0x37, 0xa1, 0x68, 0x58, 0x5e, 0x7f, - 0x44, 0x7c, 0x11, 0x1e, 0x2a, 0x0a, 0xbf, 0x44, 0xee, 0x0a, 0xa1, 0x30, 0x51, 0xab, 0xdf, 0x65, - 0x00, 0xa2, 0xc9, 0x0d, 0xf5, 0xb8, 0x3d, 0x7c, 0xea, 0x3b, 0x70, 0xf4, 0xb1, 0x68, 0xfc, 0xdc, - 0x9e, 0xf5, 0xa9, 0xc6, 0x3e, 0xb9, 0xdc, 0x11, 0x40, 0x2c, 0x87, 0x47, 0xf9, 0x81, 0x56, 0xe1, - 0x72, 0x6c, 0x96, 0xd4, 0x0e, 0x75, 0xf7, 0x50, 0x0b, 0x6b, 0x58, 0x39, 0x1a, 0x16, 0x9b, 0xba, - 0x7b, 0xd8, 0x32, 0x2a, 0xbf, 0x27, 0x7d, 0x9d, 0x04, 0x1c, 0xbd, 0x86, 0xf9, 0x03, 0x8f, 0x0e, - 0x78, 0x22, 0x6b, 0x62, 0xa0, 0x9f, 0xa5, 0xe0, 0x17, 0x03, 0x8a, 0x36, 0xa7, 0xec, 0xc3, 0x55, - 0xcb, 0x31, 0x87, 0x26, 0xd5, 0x47, 0xda, 0x24, 0x77, 0x62, 0x06, 0xee, 0xc5, 0x80, 0x6b, 0x27, - 0x7e, 0x47, 0x0b, 0xf2, 0x07, 0xe6, 0x88, 0x48, 0xda, 0xe4, 0x0c, 0xb4, 0x39, 0x0e, 0x17, 0x54, - 0x37, 0xa0, 0x30, 0x32, 0x29, 0xd1, 0xa8, 0x37, 0xee, 0x13, 0x47, 0x78, 0x34, 0x89, 0x81, 0x6f, - 0xb5, 0xc5, 0x0e, 0xba, 0x09, 0xf3, 0x03, 0x6b, 0xe4, 0x8d, 0x69, 0x20, 0x92, 0x16, 0x22, 0x45, - 0xb9, 0xe9, 0x0b, 0xd5, 0xa1, 0x30, 0xb2, 0x74, 0x43, 0x1b, 0x5b, 0x86, 0x37, 0x0a, 0xfe, 0xaf, - 0x38, 0x6f, 0x08, 0x7e, 0x25, 0x04, 0x31, 0x70, 0x94, 0x5c, 0xa3, 0x2e, 0x94, 0xe4, 0x38, 0xab, - 0x1d, 0x13, 0xc7, 0xe5, 0xdd, 0x37, 0x3b, 0x83, 0x65, 0xf3, 0x92, 0x63, 0x5f, 0x52, 0x54, 0xbe, - 0x51, 0xa0, 0x10, 0x8b, 0x1d, 0xb4, 0x03, 0x69, 0x11, 0x7e, 0xd3, 0x8c, 0x9d, 0xef, 0x8a, 0x3e, - 0x2c, 0xe1, 0xe8, 0x1e, 0x2c, 0x06, 0x65, 0x45, 0x86, 0xf3, 0x44, 0x5d, 0x41, 0xfe, 0x99, 0xbc, - 0x54, 0x16, 0x96, 0x1f, 0x15, 0xc8, 0xf8, 0x96, 0x6e, 0x43, 0xc6, 0x7f, 0xa8, 0x59, 0xc2, 0xcd, - 0xc7, 0xa2, 0x8f, 0x20, 0xd7, 0xf7, 0xf8, 0x68, 0xee, 0x87, 0xfb, 0x5f, 0xe5, 0xc9, 0x0a, 0x74, - 0xcb, 0xa8, 0x7e, 0x0e, 0x0b, 0x67, 0x4e, 0xa3, 0xd1, 0x59, 0x89, 0x8d, 0xce, 0xdc, 0x6c, 0x26, - 0x45, 0x89, 0xa1, 0xf5, 0x4f, 0x18, 0x99, 0x34, 0x3b, 0x3c, 0xab, 0x9f, 0x30, 0x22, 0xcc, 0xae, - 0xdb, 0x70, 0xdd, 0xb4, 0xde, 0xaf, 0x57, 0x5d, 0xfe, 0x57, 0xd0, 0xe1, 0x9b, 0x1d, 0xe5, 0xb3, - 0xfa, 0xd0, 0x64, 0x87, 0x5e, 0xbf, 0x36, 0xb0, 0xc6, 0x6b, 0x52, 0x7e, 0xd5, 0xa4, 0x2e, 0x73, - 0xbc, 0x31, 0xa1, 0xb2, 0xdf, 0xae, 0x45, 0x54, 0xab, 0xf2, 0x67, 0x89, 0x21, 0xa1, 0xab, 0xc3, - 0xe8, 0xf7, 0x8d, 0x7e, 0x46, 0x6c, 0xdf, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe0, 0x94, - 0x45, 0x03, 0x11, 0x00, 0x00, -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go deleted file mode 100644 index 2ac2d28c47..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go +++ /dev/null @@ -1,358 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/trace/v1/trace_config.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// How spans should be sampled: -// - Always off -// - Always on -// - Always follow the parent Span's decision (off if no parent). -type ConstantSampler_ConstantDecision int32 - -const ( - ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 - ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 - ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 -) - -var ConstantSampler_ConstantDecision_name = map[int32]string{ - 0: "ALWAYS_OFF", - 1: "ALWAYS_ON", - 2: "ALWAYS_PARENT", -} - -var ConstantSampler_ConstantDecision_value = map[string]int32{ - "ALWAYS_OFF": 0, - "ALWAYS_ON": 1, - "ALWAYS_PARENT": 2, -} - -func (x ConstantSampler_ConstantDecision) String() string { - return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) -} - -func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{2, 0} -} - -// Global configuration of the trace service. All fields must be specified, or -// the default (zero) values will be used for each type. -type TraceConfig struct { - // The global default sampler used to make decisions on span sampling. - // - // Types that are valid to be assigned to Sampler: - // *TraceConfig_ProbabilitySampler - // *TraceConfig_ConstantSampler - // *TraceConfig_RateLimitingSampler - Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` - // The global default max number of attributes per span. - MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` - // The global default max number of annotation events per span. - MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"` - // The global default max number of message events per span. - MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"` - // The global default max number of link entries per span. - MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TraceConfig) Reset() { *m = TraceConfig{} } -func (m *TraceConfig) String() string { return proto.CompactTextString(m) } -func (*TraceConfig) ProtoMessage() {} -func (*TraceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{0} -} - -func (m *TraceConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TraceConfig.Unmarshal(m, b) -} -func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) -} -func (m *TraceConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_TraceConfig.Merge(m, src) -} -func (m *TraceConfig) XXX_Size() int { - return xxx_messageInfo_TraceConfig.Size(m) -} -func (m *TraceConfig) XXX_DiscardUnknown() { - xxx_messageInfo_TraceConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_TraceConfig proto.InternalMessageInfo - -type isTraceConfig_Sampler interface { - isTraceConfig_Sampler() -} - -type TraceConfig_ProbabilitySampler struct { - ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"` -} - -type TraceConfig_ConstantSampler struct { - ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` -} - -type TraceConfig_RateLimitingSampler struct { - RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` -} - -func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {} - -func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} - -func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} - -func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { - if m != nil { - return m.Sampler - } - return nil -} - -func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler { - if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok { - return x.ProbabilitySampler - } - return nil -} - -func (m *TraceConfig) GetConstantSampler() *ConstantSampler { - if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { - return x.ConstantSampler - } - return nil -} - -func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { - if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { - return x.RateLimitingSampler - } - return nil -} - -func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { - if m != nil { - return m.MaxNumberOfAttributes - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 { - if m != nil { - return m.MaxNumberOfAnnotations - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 { - if m != nil { - return m.MaxNumberOfMessageEvents - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfLinks() int64 { - if m != nil { - return m.MaxNumberOfLinks - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*TraceConfig) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*TraceConfig_ProbabilitySampler)(nil), - (*TraceConfig_ConstantSampler)(nil), - (*TraceConfig_RateLimitingSampler)(nil), - } -} - -// Sampler that tries to uniformly sample traces with a given probability. -// The probability of sampling a trace is equal to that of the specified probability. -type ProbabilitySampler struct { - // The desired probability of sampling. Must be within [0.0, 1.0]. - SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} } -func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) } -func (*ProbabilitySampler) ProtoMessage() {} -func (*ProbabilitySampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{1} -} - -func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b) -} -func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic) -} -func (m *ProbabilitySampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbabilitySampler.Merge(m, src) -} -func (m *ProbabilitySampler) XXX_Size() int { - return xxx_messageInfo_ProbabilitySampler.Size(m) -} -func (m *ProbabilitySampler) XXX_DiscardUnknown() { - xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m) -} - -var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo - -func (m *ProbabilitySampler) GetSamplingProbability() float64 { - if m != nil { - return m.SamplingProbability - } - return 0 -} - -// Sampler that always makes a constant decision on span sampling. -type ConstantSampler struct { - Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } -func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } -func (*ConstantSampler) ProtoMessage() {} -func (*ConstantSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{2} -} - -func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) -} -func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) -} -func (m *ConstantSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConstantSampler.Merge(m, src) -} -func (m *ConstantSampler) XXX_Size() int { - return xxx_messageInfo_ConstantSampler.Size(m) -} -func (m *ConstantSampler) XXX_DiscardUnknown() { - xxx_messageInfo_ConstantSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo - -func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { - if m != nil { - return m.Decision - } - return ConstantSampler_ALWAYS_OFF -} - -// Sampler that tries to sample with a rate per time window. -type RateLimitingSampler struct { - // Rate per second. - Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } -func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } -func (*RateLimitingSampler) ProtoMessage() {} -func (*RateLimitingSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{3} -} - -func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) -} -func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) -} -func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitingSampler.Merge(m, src) -} -func (m *RateLimitingSampler) XXX_Size() int { - return xxx_messageInfo_RateLimitingSampler.Size(m) -} -func (m *RateLimitingSampler) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo - -func (m *RateLimitingSampler) GetQps() int64 { - if m != nil { - return m.Qps - } - return 0 -} - -func init() { - proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) - proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig") - proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler") - proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler") - proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler") -} - -func init() { - proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5) -} - -var fileDescriptor_5359209b41ff50c5 = []byte{ - // 486 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x4e, 0xdb, 0x40, - 0x10, 0x86, 0x31, 0xa1, 0x50, 0x06, 0x01, 0xee, 0x5a, 0x54, 0x46, 0xe2, 0x80, 0x7c, 0x29, 0xaa, - 0x6a, 0xbb, 0xd0, 0x43, 0x55, 0x55, 0xaa, 0x94, 0x00, 0x51, 0x0f, 0x69, 0x88, 0x0c, 0x52, 0xd4, - 0x5e, 0xdc, 0xb5, 0xd9, 0xb8, 0xab, 0xc6, 0xb3, 0xae, 0x77, 0x1d, 0xd1, 0x77, 0xe9, 0x43, 0xf4, - 0x11, 0xab, 0xac, 0x5d, 0xdb, 0x49, 0x00, 0x71, 0xdb, 0xf9, 0xff, 0xf9, 0x7e, 0xaf, 0xbc, 0x33, - 0xf0, 0x46, 0x64, 0x0c, 0x63, 0x86, 0xb2, 0x90, 0x7e, 0x96, 0x0b, 0x25, 0x7c, 0x95, 0xd3, 0x98, - 0xf9, 0xb3, 0xd3, 0xf2, 0x10, 0xc6, 0x02, 0x27, 0x3c, 0xf1, 0xb4, 0x47, 0x0e, 0x9b, 0xee, 0x52, - 0xf1, 0x74, 0x93, 0x37, 0x3b, 0x75, 0xfe, 0x6c, 0xc0, 0xce, 0xcd, 0xbc, 0x38, 0xd7, 0x00, 0xf9, - 0x0e, 0x56, 0x96, 0x8b, 0x88, 0x46, 0x7c, 0xca, 0xd5, 0xef, 0x50, 0xd2, 0x34, 0x9b, 0xb2, 0xdc, - 0x36, 0x8e, 0x8d, 0x93, 0x9d, 0x33, 0xd7, 0x7b, 0x30, 0xc8, 0x1b, 0x35, 0xd4, 0x75, 0x09, 0x7d, - 0x5e, 0x0b, 0x48, 0xb6, 0xa2, 0x92, 0x31, 0x98, 0xb1, 0x40, 0xa9, 0x28, 0xaa, 0x3a, 0x7e, 0x5d, - 0xc7, 0xbf, 0x7e, 0x24, 0xfe, 0xbc, 0x42, 0x9a, 0xec, 0xfd, 0x78, 0x51, 0x22, 0xb7, 0x70, 0x90, - 0x53, 0xc5, 0xc2, 0x29, 0x4f, 0xb9, 0xe2, 0x98, 0xd4, 0xe9, 0x1d, 0x9d, 0xee, 0x3d, 0x92, 0x1e, - 0x50, 0xc5, 0x06, 0x15, 0xd6, 0x7c, 0xc1, 0xca, 0x57, 0x65, 0xf2, 0x1e, 0xec, 0x94, 0xde, 0x85, - 0x58, 0xa4, 0x11, 0xcb, 0x43, 0x31, 0x09, 0xa9, 0x52, 0x39, 0x8f, 0x0a, 0xc5, 0xa4, 0xbd, 0x71, - 0x6c, 0x9c, 0x74, 0x82, 0x83, 0x94, 0xde, 0x0d, 0xb5, 0x7d, 0x35, 0xe9, 0xd6, 0x26, 0xf9, 0x00, - 0x87, 0x4b, 0x20, 0xa2, 0x50, 0x54, 0x71, 0x81, 0xd2, 0x7e, 0xa6, 0xc9, 0x97, 0x6d, 0xb2, 0x71, - 0xc9, 0x27, 0x38, 0x5a, 0x44, 0x53, 0x26, 0x25, 0x4d, 0x58, 0xc8, 0x66, 0x0c, 0x95, 0xb4, 0x37, - 0x35, 0x6d, 0xb7, 0xe8, 0x2f, 0x65, 0xc3, 0xa5, 0xf6, 0x89, 0x0b, 0xd6, 0x22, 0x3f, 0xe5, 0xf8, - 0x53, 0xda, 0x5b, 0x1a, 0x33, 0x5b, 0xd8, 0x60, 0xae, 0xf7, 0xb6, 0x61, 0xab, 0xfa, 0x75, 0x4e, - 0x1f, 0xc8, 0xea, 0xc3, 0x92, 0xb7, 0x60, 0xe9, 0x06, 0x8e, 0x49, 0xcb, 0xd5, 0x43, 0x62, 0x04, - 0xf7, 0x59, 0xce, 0x5f, 0x03, 0xf6, 0x97, 0x9e, 0x90, 0x8c, 0xe1, 0xf9, 0x2d, 0x8b, 0xb9, 0xe4, - 0x02, 0x35, 0xba, 0x77, 0xf6, 0xf1, 0xe9, 0x03, 0x50, 0xd7, 0x17, 0x55, 0x44, 0x50, 0x87, 0x39, - 0x17, 0x60, 0x2e, 0xbb, 0x64, 0x0f, 0xa0, 0x3b, 0x18, 0x77, 0xbf, 0x5e, 0x87, 0x57, 0xfd, 0xbe, - 0xb9, 0x46, 0x76, 0x61, 0xfb, 0x7f, 0x3d, 0x34, 0x0d, 0xf2, 0x02, 0x76, 0xab, 0x72, 0xd4, 0x0d, - 0x2e, 0x87, 0x37, 0xe6, 0xba, 0xf3, 0x0a, 0xac, 0x7b, 0xc6, 0x82, 0x98, 0xd0, 0xf9, 0x95, 0x49, - 0x7d, 0xe1, 0x4e, 0x30, 0x3f, 0xf6, 0x66, 0x70, 0xc4, 0xc5, 0xc3, 0x37, 0xef, 0x99, 0xad, 0xfd, - 0x1a, 0xcd, 0xad, 0x91, 0xf1, 0xad, 0x97, 0x70, 0xf5, 0xa3, 0x88, 0xbc, 0x58, 0xa4, 0x7e, 0x49, - 0xb9, 0x1c, 0xa5, 0xca, 0x8b, 0x94, 0x61, 0xf9, 0xea, 0x7e, 0x13, 0xe8, 0x96, 0x1b, 0x9e, 0x30, - 0x74, 0x93, 0x66, 0xd1, 0xa3, 0x4d, 0x2d, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x13, 0xe2, - 0xd9, 0x56, 0x0c, 0x04, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go deleted file mode 100644 index e9cc202585..0000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ /dev/null @@ -1,1284 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. -It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. - -This package produces a different output than the standard "encoding/json" package, -which does not operate correctly on protocol buffers. -*/ -package jsonpb - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - - stpb "github.com/golang/protobuf/ptypes/struct" -) - -const secondInNanos = int64(time.Second / time.Nanosecond) -const maxSecondsInDuration = 315576000000 - -// Marshaler is a configurable object for converting between -// protocol buffer objects and a JSON representation for them. -type Marshaler struct { - // Whether to render enum values as integers, as opposed to string values. - EnumsAsInts bool - - // Whether to render fields with zero values. - EmitDefaults bool - - // A string to indent each level by. The presence of this field will - // also cause a space to appear between the field separator and - // value, and for newlines to be appear between fields and array - // elements. - Indent string - - // Whether to use the original (.proto) name for fields. - OrigName bool - - // A custom URL resolver to use when marshaling Any messages to JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// AnyResolver takes a type URL, present in an Any message, and resolves it into -// an instance of the associated message. -type AnyResolver interface { - Resolve(typeUrl string) (proto.Message, error) -} - -func defaultResolveAny(typeUrl string) (proto.Message, error) { - // Only the part of typeUrl after the last slash is relevant. - mname := typeUrl - if slash := strings.LastIndex(mname, "/"); slash >= 0 { - mname = mname[slash+1:] - } - mt := proto.MessageType(mname) - if mt == nil { - return nil, fmt.Errorf("unknown message type %q", mname) - } - return reflect.New(mt.Elem()).Interface().(proto.Message), nil -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should -// also implement JSONPBUnmarshaler so that the custom format can be -// parsed. -// -// The JSON marshaling must follow the proto to JSON specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize -// the way they are unmarshaled from JSON. Messages that implement this -// should also implement JSONPBMarshaler so that the custom format can be -// produced. -// -// The JSON unmarshaling must follow the JSON to proto specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Marshal marshals a protocol buffer into JSON. -func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { - v := reflect.ValueOf(pb) - if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { - return errors.New("Marshal called with nil") - } - // Check for unset required fields first. - if err := checkRequiredFields(pb); err != nil { - return err - } - writer := &errWriter{writer: out} - return m.marshalObject(writer, pb, "", "") -} - -// MarshalToString converts a protocol buffer object to JSON string. -func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { - var buf bytes.Buffer - if err := m.Marshal(&buf, pb); err != nil { - return "", err - } - return buf.String(), nil -} - -type int32Slice []int32 - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(1), - `"-Infinity"`: math.Inf(-1), -} - -// For sorting extensions ids to ensure stable output. -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type wkt interface { - XXX_WellKnownType() string -} - -// marshalObject writes a struct to the Writer. -func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { - if jsm, ok := v.(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(m) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", v, err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if m.Indent != "" { - b, err = json.MarshalIndent(js, indent, m.Indent) - } else { - b, err = json.Marshal(js) - } - if err != nil { - return err - } - } - - out.write(string(b)) - return out.err - } - - s := reflect.ValueOf(v).Elem() - - // Handle well-known types. - if wkt, ok := v.(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - // "Wrappers use the same representation in JSON - // as the wrapped primitive type, ..." - sprop := proto.GetProperties(s.Type()) - return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) - case "Any": - // Any is a bit more involved. - return m.marshalAny(out, v, indent) - case "Duration": - s, ns := s.Field(0).Int(), s.Field(1).Int() - if s < -maxSecondsInDuration || s > maxSecondsInDuration { - return fmt.Errorf("seconds out of range %v", s) - } - if ns <= -secondInNanos || ns >= secondInNanos { - return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) - } - if (s > 0 && ns < 0) || (s < 0 && ns > 0) { - return errors.New("signs of seconds and nanos do not match") - } - // Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision, followed by the suffix "s". - f := "%d.%09d" - if ns < 0 { - ns = -ns - if s == 0 { - f = "-%d.%09d" - } - } - x := fmt.Sprintf(f, s, ns) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - out.write(`"`) - out.write(x) - out.write(`s"`) - return out.err - case "Struct", "ListValue": - // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 0, 3, 6 or 9 fractional digits." - s, ns := s.Field(0).Int(), s.Field(1).Int() - if ns < 0 || ns >= secondInNanos { - return fmt.Errorf("ns out of range [0, %v)", secondInNanos) - } - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - out.write(`"`) - out.write(x) - out.write(`Z"`) - return out.err - case "Value": - // Value has a single oneof. - kind := s.Field(0) - if kind.IsNil() { - // "absence of any variant indicates an error" - return errors.New("nil Value") - } - // oneof -> *T -> T -> T.F - x := kind.Elem().Elem().Field(0) - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, x, indent) - } - } - - out.write("{") - if m.Indent != "" { - out.write("\n") - } - - firstField := true - - if typeURL != "" { - if err := m.marshalTypeURL(out, indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < s.NumField(); i++ { - value := s.Field(i) - valueField := s.Type().Field(i) - if strings.HasPrefix(valueField.Name, "XXX_") { - continue - } - - // IsNil will panic on most value kinds. - switch value.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface: - if value.IsNil() { - continue - } - } - - if !m.EmitDefaults { - switch value.Kind() { - case reflect.Bool: - if !value.Bool() { - continue - } - case reflect.Int32, reflect.Int64: - if value.Int() == 0 { - continue - } - case reflect.Uint32, reflect.Uint64: - if value.Uint() == 0 { - continue - } - case reflect.Float32, reflect.Float64: - if value.Float() == 0 { - continue - } - case reflect.String: - if value.Len() == 0 { - continue - } - case reflect.Map, reflect.Ptr, reflect.Slice: - if value.IsNil() { - continue - } - } - } - - // Oneof fields need special handling. - if valueField.Tag.Get("protobuf_oneof") != "" { - // value is an interface containing &T{real_value}. - sv := value.Elem().Elem() // interface -> *T -> T - value = sv.Field(0) - valueField = sv.Type().Field(0) - } - prop := jsonProperties(valueField, m.OrigName) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, prop, value, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if ep, ok := v.(proto.Message); ok { - extensions := proto.RegisteredExtensions(v) - // Sort extensions for stable output. - ids := make([]int32, 0, len(extensions)) - for id, desc := range extensions { - if !proto.HasExtension(ep, desc) { - continue - } - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - for _, id := range ids { - desc := extensions[id] - if desc == nil { - // unknown extension - continue - } - ext, extErr := proto.GetExtension(ep, desc) - if extErr != nil { - return extErr - } - value := reflect.ValueOf(ext) - var prop proto.Properties - prop.Parse(desc.Tag) - prop.JSONName = fmt.Sprintf("[%s]", desc.Name) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, &prop, value, indent); err != nil { - return err - } - firstField = false - } - - } - - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err -} - -func (m *Marshaler) writeSep(out *errWriter) { - if m.Indent != "" { - out.write(",\n") - } else { - out.write(",") - } -} - -func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - v := reflect.ValueOf(any).Elem() - turl := v.Field(0).String() - val := v.Field(1).Bytes() - - var msg proto.Message - var err error - if m.AnyResolver != nil { - msg, err = m.AnyResolver.Resolve(turl) - } else { - msg, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if err := proto.Unmarshal(val, msg); err != nil { - return err - } - - if _, ok := msg.(wkt); ok { - out.write("{") - if m.Indent != "" { - out.write("\n") - } - if err := m.marshalTypeURL(out, indent, turl); err != nil { - return err - } - m.writeSep(out) - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - out.write(`"value": `) - } else { - out.write(`"value":`) - } - if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { - return err - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err - } - - return m.marshalObject(out, msg, indent, turl) -} - -func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"@type":`) - if m.Indent != "" { - out.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - out.write(string(b)) - return out.err -} - -// marshalField writes field description and value to the Writer. -func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"`) - out.write(prop.JSONName) - out.write(`":`) - if m.Indent != "" { - out.write(" ") - } - if err := m.marshalValue(out, prop, v, indent); err != nil { - return err - } - return nil -} - -// marshalValue writes the value to the Writer. -func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - var err error - v = reflect.Indirect(v) - - // Handle nil pointer - if v.Kind() == reflect.Invalid { - out.write("null") - return out.err - } - - // Handle repeated elements. - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - out.write("[") - comma := "" - for i := 0; i < v.Len(); i++ { - sliceVal := v.Index(i) - out.write(comma) - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { - return err - } - comma = "," - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write("]") - return out.err - } - - // Handle well-known types. - // Most are handled up in marshalObject (because 99% are messages). - if wkt, ok := v.Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "NullValue": - out.write("null") - return out.err - } - } - - // Handle enumerations. - if !m.EnumsAsInts && prop.Enum != "" { - // Unknown enum values will are stringified by the proto library as their - // value. Such values should _not_ be quoted or they will be interpreted - // as an enum string instead of their value. - enumStr := v.Interface().(fmt.Stringer).String() - var valStr string - if v.Kind() == reflect.Ptr { - valStr = strconv.Itoa(int(v.Elem().Int())) - } else { - valStr = strconv.Itoa(int(v.Int())) - } - isKnownEnum := enumStr != valStr - if isKnownEnum { - out.write(`"`) - } - out.write(enumStr) - if isKnownEnum { - out.write(`"`) - } - return out.err - } - - // Handle nested messages. - if v.Kind() == reflect.Struct { - return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") - } - - // Handle maps. - // Since Go randomizes map iteration, we sort keys for stable output. - if v.Kind() == reflect.Map { - out.write(`{`) - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for i, k := range keys { - if i > 0 { - out.write(`,`) - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - - // TODO handle map key prop properly - b, err := json.Marshal(k.Interface()) - if err != nil { - return err - } - s := string(b) - - // If the JSON is not a string value, encode it again to make it one. - if !strings.HasPrefix(s, `"`) { - b, err := json.Marshal(s) - if err != nil { - return err - } - s = string(b) - } - - out.write(s) - out.write(`:`) - if m.Indent != "" { - out.write(` `) - } - - vprop := prop - if prop != nil && prop.MapValProp != nil { - vprop = prop.MapValProp - } - if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { - return err - } - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write(`}`) - return out.err - } - - // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - f := v.Float() - var sval string - switch { - case math.IsInf(f, 1): - sval = `"Infinity"` - case math.IsInf(f, -1): - sval = `"-Infinity"` - case math.IsNaN(f): - sval = `"NaN"` - } - if sval != "" { - out.write(sval) - return out.err - } - } - - // Default handling defers to the encoding/json library. - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) - if needToQuote { - out.write(`"`) - } - out.write(string(b)) - if needToQuote { - out.write(`"`) - } - return out.err -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // Whether to allow messages to contain unknown fields, as opposed to - // failing to unmarshal. - AllowUnknownFields bool - - // A custom URL resolver to use when unmarshaling Any messages from JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - inputValue := json.RawMessage{} - if err := dec.Decode(&inputValue); err != nil { - return err - } - if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { - return err - } - return checkRequiredFields(pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { - dec := json.NewDecoder(r) - return u.UnmarshalNext(dec, pb) -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - return new(Unmarshaler).UnmarshalNext(dec, pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func Unmarshal(r io.Reader, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(r, pb) -} - -// UnmarshalString will populate the fields of a protocol buffer based -// on a JSON string. This function is lenient and will decode any options -// permutations of the related Marshaler. -func UnmarshalString(str string, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) -} - -// unmarshalValue converts/copies a value into the target. -// prop may be nil. -func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { - targetType := target.Type() - - // Allocate memory for pointer fields. - if targetType.Kind() == reflect.Ptr { - // If input value is "null" and target is a pointer type, then the field should be treated as not set - // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. - _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) - if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { - return nil - } - target.Set(reflect.New(targetType.Elem())) - - return u.unmarshalValue(target.Elem(), inputValue, prop) - } - - if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, []byte(inputValue)) - } - - // Handle well-known types that are not pointers. - if w, ok := target.Addr().Interface().(wkt); ok { - switch w.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - return u.unmarshalValue(target.Field(0), inputValue, prop) - case "Any": - // Use json.RawMessage pointer type instead of value to support pre-1.8 version. - // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see - // https://github.com/golang/go/issues/14493 - var jsonFields map[string]*json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - val, ok := jsonFields["@type"] - if !ok || val == nil { - return errors.New("Any JSON doesn't have '@type'") - } - - var turl string - if err := json.Unmarshal([]byte(*val), &turl); err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) - } - target.Field(0).SetString(turl) - - var m proto.Message - var err error - if u.AnyResolver != nil { - m, err = u.AnyResolver.Resolve(turl) - } else { - m, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if _, ok := m.(wkt); ok { - val, ok := jsonFields["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - - if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } else { - delete(jsonFields, "@type") - nestedProto, err := json.Marshal(jsonFields) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - - if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } - - b, err := proto.Marshal(m) - if err != nil { - return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) - } - target.Field(1).SetBytes(b) - - return nil - case "Duration": - unq, err := unquote(string(inputValue)) - if err != nil { - return err - } - - d, err := time.ParseDuration(unq) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - target.Field(0).SetInt(s) - target.Field(1).SetInt(ns) - return nil - case "Timestamp": - unq, err := unquote(string(inputValue)) - if err != nil { - return err - } - - t, err := time.Parse(time.RFC3339Nano, unq) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - target.Field(0).SetInt(t.Unix()) - target.Field(1).SetInt(int64(t.Nanosecond())) - return nil - case "Struct": - var m map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &m); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) - for k, jv := range m { - pv := &stpb.Value{} - if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) - } - target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) - } - return nil - case "ListValue": - var s []json.RawMessage - if err := json.Unmarshal(inputValue, &s); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) - for i, sv := range s { - if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { - return err - } - } - return nil - case "Value": - ivStr := string(inputValue) - if ivStr == "null" { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) - } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) - } else if v, err := unquote(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) - } else if v, err := strconv.ParseBool(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) - } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { - lv := &stpb.ListValue{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) - return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) - } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { - sv := &stpb.Struct{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) - return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) - } else { - return fmt.Errorf("unrecognized type for Value %q", ivStr) - } - return nil - } - } - - // Handle enums, which have an underlying type of int32, - // and may appear as strings. - // The case of an enum appearing as a number is handled - // at the bottom of this function. - if inputValue[0] == '"' && prop != nil && prop.Enum != "" { - vmap := proto.EnumValueMap(prop.Enum) - // Don't need to do unquoting; valid enum names - // are from a limited character set. - s := inputValue[1 : len(inputValue)-1] - n, ok := vmap[string(s)] - if !ok { - return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) - } - if target.Kind() == reflect.Ptr { // proto2 - target.Set(reflect.New(targetType.Elem())) - target = target.Elem() - } - if targetType.Kind() != reflect.Int32 { - return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) - } - target.SetInt(int64(n)) - return nil - } - - // Handle nested messages. - if targetType.Kind() == reflect.Struct { - var jsonFields map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { - // Be liberal in what names we accept; both orig_name and camelName are okay. - fieldNames := acceptedJSONFieldNames(prop) - - vOrig, okOrig := jsonFields[fieldNames.orig] - vCamel, okCamel := jsonFields[fieldNames.camel] - if !okOrig && !okCamel { - return nil, false - } - // If, for some reason, both are present in the data, favour the camelName. - var raw json.RawMessage - if okOrig { - raw = vOrig - delete(jsonFields, fieldNames.orig) - } - if okCamel { - raw = vCamel - delete(jsonFields, fieldNames.camel) - } - return raw, true - } - - sprops := proto.GetProperties(targetType) - for i := 0; i < target.NumField(); i++ { - ft := target.Type().Field(i) - if strings.HasPrefix(ft.Name, "XXX_") { - continue - } - - valueForField, ok := consumeField(sprops.Prop[i]) - if !ok { - continue - } - - if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { - return err - } - } - // Check for any oneof fields. - if len(jsonFields) > 0 { - for _, oop := range sprops.OneofTypes { - raw, ok := consumeField(oop.Prop) - if !ok { - continue - } - nv := reflect.New(oop.Type.Elem()) - target.Field(oop.Field).Set(nv) - if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { - return err - } - } - } - // Handle proto2 extensions. - if len(jsonFields) > 0 { - if ep, ok := target.Addr().Interface().(proto.Message); ok { - for _, ext := range proto.RegisteredExtensions(ep) { - name := fmt.Sprintf("[%s]", ext.Name) - raw, ok := jsonFields[name] - if !ok { - continue - } - delete(jsonFields, name) - nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) - if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { - return err - } - if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { - return err - } - } - } - } - if !u.AllowUnknownFields && len(jsonFields) > 0 { - // Pick any field to be the scapegoat. - var f string - for fname := range jsonFields { - f = fname - break - } - return fmt.Errorf("unknown field %q in %v", f, targetType) - } - return nil - } - - // Handle arrays (which aren't encoded bytes) - if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { - var slc []json.RawMessage - if err := json.Unmarshal(inputValue, &slc); err != nil { - return err - } - if slc != nil { - l := len(slc) - target.Set(reflect.MakeSlice(targetType, l, l)) - for i := 0; i < l; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { - return err - } - } - } - return nil - } - - // Handle maps (whose keys are always strings) - if targetType.Kind() == reflect.Map { - var mp map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &mp); err != nil { - return err - } - if mp != nil { - target.Set(reflect.MakeMap(targetType)) - for ks, raw := range mp { - // Unmarshal map key. The core json library already decoded the key into a - // string, so we handle that specially. Other types were quoted post-serialization. - var k reflect.Value - if targetType.Key().Kind() == reflect.String { - k = reflect.ValueOf(ks) - } else { - k = reflect.New(targetType.Key()).Elem() - var kprop *proto.Properties - if prop != nil && prop.MapKeyProp != nil { - kprop = prop.MapKeyProp - } - if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { - return err - } - } - - // Unmarshal map value. - v := reflect.New(targetType.Elem()).Elem() - var vprop *proto.Properties - if prop != nil && prop.MapValProp != nil { - vprop = prop.MapValProp - } - if err := u.unmarshalValue(v, raw, vprop); err != nil { - return err - } - target.SetMapIndex(k, v) - } - } - return nil - } - - // Non-finite numbers can be encoded as strings. - isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 - if isFloat { - if num, ok := nonFinite[string(inputValue)]; ok { - target.SetFloat(num) - return nil - } - } - - // integers & floats can be encoded as strings. In this case we drop - // the quotes and proceed as normal. - isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || - targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || - targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 - if isNum && strings.HasPrefix(string(inputValue), `"`) { - inputValue = inputValue[1 : len(inputValue)-1] - } - - // Use the encoding/json for parsing other value types. - return json.Unmarshal(inputValue, target.Addr().Interface()) -} - -func unquote(s string) (string, error) { - var ret string - err := json.Unmarshal([]byte(s), &ret) - return ret, err -} - -// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. -func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { - var prop proto.Properties - prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) - if origName || prop.JSONName == "" { - prop.JSONName = prop.OrigName - } - return &prop -} - -type fieldNames struct { - orig, camel string -} - -func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { - opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} - if prop.JSONName != "" { - opts.camel = prop.JSONName - } - return opts -} - -// Writer wrapper inspired by https://blog.golang.org/errors-are-values -type errWriter struct { - writer io.Writer - err error -} - -func (w *errWriter) write(str string) { - if w.err != nil { - return - } - _, w.err = w.writer.Write([]byte(str)) -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. -// -// Numeric keys are sorted in numeric order per -// https://developers.google.com/protocol-buffers/docs/proto#maps. -type mapKeys []reflect.Value - -func (s mapKeys) Len() int { return len(s) } -func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s mapKeys) Less(i, j int) bool { - if k := s[i].Kind(); k == s[j].Kind() { - switch k { - case reflect.String: - return s[i].String() < s[j].String() - case reflect.Int32, reflect.Int64: - return s[i].Int() < s[j].Int() - case reflect.Uint32, reflect.Uint64: - return s[i].Uint() < s[j].Uint() - } - } - return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) -} - -// checkRequiredFields returns an error if any required field in the given proto message is not set. -// This function is used by both Marshal and Unmarshal. While required fields only exist in a -// proto2 message, a proto3 message can contain proto2 message(s). -func checkRequiredFields(pb proto.Message) error { - // Most well-known type messages do not contain required fields. The "Any" type may contain - // a message that has required fields. - // - // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value - // field in order to transform that into JSON, and that should have returned an error if a - // required field is not set in the embedded message. - // - // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the - // embedded message to store the serialized message in Any.Value field, and that should have - // returned an error if a required field is not set. - if _, ok := pb.(wkt); ok { - return nil - } - - v := reflect.ValueOf(pb) - // Skip message if it is not a struct pointer. - if v.Kind() != reflect.Ptr { - return nil - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return nil - } - - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - sfield := v.Type().Field(i) - - if sfield.PkgPath != "" { - // blank PkgPath means the field is exported; skip if not exported - continue - } - - if strings.HasPrefix(sfield.Name, "XXX_") { - continue - } - - // Oneof field is an interface implemented by wrapper structs containing the actual oneof - // field, i.e. an interface containing &T{real_value}. - if sfield.Tag.Get("protobuf_oneof") != "" { - if field.Kind() != reflect.Interface { - continue - } - v := field.Elem() - if v.Kind() != reflect.Ptr || v.IsNil() { - continue - } - v = v.Elem() - if v.Kind() != reflect.Struct || v.NumField() < 1 { - continue - } - field = v.Field(0) - sfield = v.Type().Field(0) - } - - protoTag := sfield.Tag.Get("protobuf") - if protoTag == "" { - continue - } - var prop proto.Properties - prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) - - switch field.Kind() { - case reflect.Map: - if field.IsNil() { - continue - } - // Check each map value. - keys := field.MapKeys() - for _, k := range keys { - v := field.MapIndex(k) - if err := checkRequiredFieldsInValue(v); err != nil { - return err - } - } - case reflect.Slice: - // Handle non-repeated type, e.g. bytes. - if !prop.Repeated { - if prop.Required && field.IsNil() { - return fmt.Errorf("required field %q is not set", prop.Name) - } - continue - } - - // Handle repeated type. - if field.IsNil() { - continue - } - // Check each slice item. - for i := 0; i < field.Len(); i++ { - v := field.Index(i) - if err := checkRequiredFieldsInValue(v); err != nil { - return err - } - } - case reflect.Ptr: - if field.IsNil() { - if prop.Required { - return fmt.Errorf("required field %q is not set", prop.Name) - } - continue - } - if err := checkRequiredFieldsInValue(field); err != nil { - return err - } - } - } - - // Handle proto2 extensions. - for _, ext := range proto.RegisteredExtensions(pb) { - if !proto.HasExtension(pb, ext) { - continue - } - ep, err := proto.GetExtension(pb, ext) - if err != nil { - return err - } - err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) - if err != nil { - return err - } - } - - return nil -} - -func checkRequiredFieldsInValue(v reflect.Value) error { - if pm, ok := v.Interface().(proto.Message); ok { - return checkRequiredFields(pm) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go deleted file mode 100644 index 6f4a902b5b..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ /dev/null @@ -1,2806 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* - The code generator for the plugin for the Google protocol buffer compiler. - It generates Go code from the protocol buffer description files read by the - main routine. -*/ -package generator - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/sha256" - "encoding/hex" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/printer" - "go/token" - "log" - "os" - "path" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" -) - -// generatedCodeVersion indicates a version of the generated code. -// It is incremented whenever an incompatibility between the generated code and -// proto package is introduced; the generated code references -// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). -const generatedCodeVersion = 3 - -// A Plugin provides functionality to add to the output during Go code generation, -// such as to produce RPC stubs. -type Plugin interface { - // Name identifies the plugin. - Name() string - // Init is called once after data structures are built but before - // code generation begins. - Init(g *Generator) - // Generate produces the code generated by the plugin for this file, - // except for the imports, by calling the generator's methods P, In, and Out. - Generate(file *FileDescriptor) - // GenerateImports produces the import declarations for this file. - // It is called after Generate. - GenerateImports(file *FileDescriptor) -} - -var plugins []Plugin - -// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. -// It is typically called during initialization. -func RegisterPlugin(p Plugin) { - plugins = append(plugins, p) -} - -// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". -type GoImportPath string - -func (p GoImportPath) String() string { return strconv.Quote(string(p)) } - -// A GoPackageName is the name of a Go package. e.g., "protobuf". -type GoPackageName string - -// Each type we import as a protocol buffer (other than FileDescriptorProto) needs -// a pointer to the FileDescriptorProto that represents it. These types achieve that -// wrapping by placing each Proto inside a struct with the pointer to its File. The -// structs have the same names as their contents, with "Proto" removed. -// FileDescriptor is used to store the things that it points to. - -// The file and package name method are common to messages and enums. -type common struct { - file *FileDescriptor // File this object comes from. -} - -// GoImportPath is the import path of the Go package containing the type. -func (c *common) GoImportPath() GoImportPath { - return c.file.importPath -} - -func (c *common) File() *FileDescriptor { return c.file } - -func fileIsProto3(file *descriptor.FileDescriptorProto) bool { - return file.GetSyntax() == "proto3" -} - -func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } - -// Descriptor represents a protocol buffer message. -type Descriptor struct { - common - *descriptor.DescriptorProto - parent *Descriptor // The containing message, if any. - nested []*Descriptor // Inner messages, if any. - enums []*EnumDescriptor // Inner enums, if any. - ext []*ExtensionDescriptor // Extensions, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or another message. - path string // The SourceCodeInfo path as comma-separated integers. - group bool -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (d *Descriptor) TypeName() []string { - if d.typename != nil { - return d.typename - } - n := 0 - for parent := d; parent != nil; parent = parent.parent { - n++ - } - s := make([]string, n) - for parent := d; parent != nil; parent = parent.parent { - n-- - s[n] = parent.GetName() - } - d.typename = s - return s -} - -// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type EnumDescriptor struct { - common - *descriptor.EnumDescriptorProto - parent *Descriptor // The containing message, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or a message. - path string // The SourceCodeInfo path as comma-separated integers. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *EnumDescriptor) TypeName() (s []string) { - if e.typename != nil { - return e.typename - } - name := e.GetName() - if e.parent == nil { - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - e.typename = s - return s -} - -// Everything but the last element of the full type name, CamelCased. -// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . -func (e *EnumDescriptor) prefix() string { - if e.parent == nil { - // If the enum is not part of a message, the prefix is just the type name. - return CamelCase(*e.Name) + "_" - } - typeName := e.TypeName() - return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" -} - -// The integer value of the named constant in this enumerated type. -func (e *EnumDescriptor) integerValueAsString(name string) string { - for _, c := range e.Value { - if c.GetName() == name { - return fmt.Sprint(c.GetNumber()) - } - } - log.Fatal("cannot find value for enum constant") - return "" -} - -// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type ExtensionDescriptor struct { - common - *descriptor.FieldDescriptorProto - parent *Descriptor // The containing message, if any. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *ExtensionDescriptor) TypeName() (s []string) { - name := e.GetName() - if e.parent == nil { - // top-level extension - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - return s -} - -// DescName returns the variable name used for the generated descriptor. -func (e *ExtensionDescriptor) DescName() string { - // The full type name. - typeName := e.TypeName() - // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. - for i, s := range typeName { - typeName[i] = CamelCase(s) - } - return "E_" + strings.Join(typeName, "_") -} - -// ImportedDescriptor describes a type that has been publicly imported from another file. -type ImportedDescriptor struct { - common - o Object -} - -func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } - -// FileDescriptor describes an protocol buffer descriptor file (.proto). -// It includes slices of all the messages and enums defined within it. -// Those slices are constructed by WrapTypes. -type FileDescriptor struct { - *descriptor.FileDescriptorProto - desc []*Descriptor // All the messages defined in this file. - enum []*EnumDescriptor // All the enums defined in this file. - ext []*ExtensionDescriptor // All the top-level extensions defined in this file. - imp []*ImportedDescriptor // All types defined in files publicly imported by this file. - - // Comments, stored as a map of path (comma-separated integers) to the comment. - comments map[string]*descriptor.SourceCodeInfo_Location - - // The full list of symbols that are exported, - // as a map from the exported object to its symbols. - // This is used for supporting public imports. - exported map[Object][]symbol - - importPath GoImportPath // Import path of this file's package. - packageName GoPackageName // Name of this file's Go package. - - proto3 bool // whether to generate proto3 code for this file -} - -// VarName is the variable name we'll use in the generated code to refer -// to the compressed bytes of this descriptor. It is not exported, so -// it is only valid inside the generated package. -func (d *FileDescriptor) VarName() string { - h := sha256.Sum256([]byte(d.GetName())) - return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8])) -} - -// goPackageOption interprets the file's go_package option. -// If there is no go_package, it returns ("", "", false). -// If there's a simple name, it returns ("", pkg, true). -// If the option implies an import path, it returns (impPath, pkg, true). -func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { - opt := d.GetOptions().GetGoPackage() - if opt == "" { - return "", "", false - } - // A semicolon-delimited suffix delimits the import path and package name. - sc := strings.Index(opt, ";") - if sc >= 0 { - return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true - } - // The presence of a slash implies there's an import path. - slash := strings.LastIndex(opt, "/") - if slash >= 0 { - return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true - } - return "", cleanPackageName(opt), true -} - -// goFileName returns the output name for the generated Go file. -func (d *FileDescriptor) goFileName(pathType pathType) string { - name := *d.Name - if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { - name = name[:len(name)-len(ext)] - } - name += ".pb.go" - - if pathType == pathTypeSourceRelative { - return name - } - - // Does the file have a "go_package" option? - // If it does, it may override the filename. - if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { - // Replace the existing dirname with the declared import path. - _, name = path.Split(name) - name = path.Join(string(impPath), name) - return name - } - - return name -} - -func (d *FileDescriptor) addExport(obj Object, sym symbol) { - d.exported[obj] = append(d.exported[obj], sym) -} - -// symbol is an interface representing an exported Go symbol. -type symbol interface { - // GenerateAlias should generate an appropriate alias - // for the symbol from the named package. - GenerateAlias(g *Generator, filename string, pkg GoPackageName) -} - -type messageSymbol struct { - sym string - hasExtensions, isMessageSet bool - oneofTypes []string -} - -type getterSymbol struct { - name string - typ string - typeName string // canonical name in proto world; empty for proto.Message and similar - genType bool // whether typ contains a generated type (message/group/enum) -} - -func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { - g.P("// ", ms.sym, " from public import ", filename) - g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) - for _, name := range ms.oneofTypes { - g.P("type ", name, " = ", pkg, ".", name) - } -} - -type enumSymbol struct { - name string - proto3 bool // Whether this came from a proto3 file. -} - -func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { - s := es.name - g.P("// ", s, " from public import ", filename) - g.P("type ", s, " = ", pkg, ".", s) - g.P("var ", s, "_name = ", pkg, ".", s, "_name") - g.P("var ", s, "_value = ", pkg, ".", s, "_value") -} - -type constOrVarSymbol struct { - sym string - typ string // either "const" or "var" - cast string // if non-empty, a type cast is required (used for enums) -} - -func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { - v := string(pkg) + "." + cs.sym - if cs.cast != "" { - v = cs.cast + "(" + v + ")" - } - g.P(cs.typ, " ", cs.sym, " = ", v) -} - -// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. -type Object interface { - GoImportPath() GoImportPath - TypeName() []string - File() *FileDescriptor -} - -// Generator is the type whose methods generate the output, stored in the associated response structure. -type Generator struct { - *bytes.Buffer - - Request *plugin.CodeGeneratorRequest // The input. - Response *plugin.CodeGeneratorResponse // The output. - - Param map[string]string // Command-line parameters. - PackageImportPath string // Go import path of the package we're generating code for - ImportPrefix string // String to prefix to imported package file names. - ImportMap map[string]string // Mapping from .proto file name to import path - - Pkg map[string]string // The names under which we import support packages - - outputImportPath GoImportPath // Package we're generating code for. - allFiles []*FileDescriptor // All files in the tree - allFilesByName map[string]*FileDescriptor // All files by filename. - genFiles []*FileDescriptor // Those files we will generate output for. - file *FileDescriptor // The file we are compiling now. - packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. - usedPackages map[GoImportPath]bool // Packages used in current file. - usedPackageNames map[GoPackageName]bool // Package names used in the current file. - addedImports map[GoImportPath]bool // Additional imports to emit. - typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. - init []string // Lines to emit in the init function. - indent string - pathType pathType // How to generate output filenames. - writeOutput bool - annotateCode bool // whether to store annotations - annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store -} - -type pathType int - -const ( - pathTypeImport pathType = iota - pathTypeSourceRelative -) - -// New creates a new generator and allocates the request and response protobufs. -func New() *Generator { - g := new(Generator) - g.Buffer = new(bytes.Buffer) - g.Request = new(plugin.CodeGeneratorRequest) - g.Response = new(plugin.CodeGeneratorResponse) - return g -} - -// Error reports a problem, including an error, and exits the program. -func (g *Generator) Error(err error, msgs ...string) { - s := strings.Join(msgs, " ") + ":" + err.Error() - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// Fail reports a problem and exits the program. -func (g *Generator) Fail(msgs ...string) { - s := strings.Join(msgs, " ") - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// CommandLineParameters breaks the comma-separated list of key=value pairs -// in the parameter (a member of the request protobuf) into a key/value map. -// It then sets file name mappings defined by those entries. -func (g *Generator) CommandLineParameters(parameter string) { - g.Param = make(map[string]string) - for _, p := range strings.Split(parameter, ",") { - if i := strings.Index(p, "="); i < 0 { - g.Param[p] = "" - } else { - g.Param[p[0:i]] = p[i+1:] - } - } - - g.ImportMap = make(map[string]string) - pluginList := "none" // Default list of plugin names to enable (empty means all). - for k, v := range g.Param { - switch k { - case "import_prefix": - g.ImportPrefix = v - case "import_path": - g.PackageImportPath = v - case "paths": - switch v { - case "import": - g.pathType = pathTypeImport - case "source_relative": - g.pathType = pathTypeSourceRelative - default: - g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) - } - case "plugins": - pluginList = v - case "annotate_code": - if v == "true" { - g.annotateCode = true - } - default: - if len(k) > 0 && k[0] == 'M' { - g.ImportMap[k[1:]] = v - } - } - } - if pluginList != "" { - // Amend the set of plugins. - enabled := make(map[string]bool) - for _, name := range strings.Split(pluginList, "+") { - enabled[name] = true - } - var nplugins []Plugin - for _, p := range plugins { - if enabled[p.Name()] { - nplugins = append(nplugins, p) - } - } - plugins = nplugins - } -} - -// DefaultPackageName returns the package name printed for the object. -// If its file is in a different package, it returns the package name we're using for this file, plus ".". -// Otherwise it returns the empty string. -func (g *Generator) DefaultPackageName(obj Object) string { - importPath := obj.GoImportPath() - if importPath == g.outputImportPath { - return "" - } - return string(g.GoPackageName(importPath)) + "." -} - -// GoPackageName returns the name used for a package. -func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { - if name, ok := g.packageNames[importPath]; ok { - return name - } - name := cleanPackageName(baseName(string(importPath))) - for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - g.packageNames[importPath] = name - g.usedPackageNames[name] = true - return name -} - -// AddImport adds a package to the generated file's import section. -// It returns the name used for the package. -func (g *Generator) AddImport(importPath GoImportPath) GoPackageName { - g.addedImports[importPath] = true - return g.GoPackageName(importPath) -} - -var globalPackageNames = map[GoPackageName]bool{ - "fmt": true, - "math": true, - "proto": true, -} - -// Create and remember a guaranteed unique package name. Pkg is the candidate name. -// The FileDescriptor parameter is unused. -func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { - name := cleanPackageName(pkg) - for i, orig := 1, name; globalPackageNames[name]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - globalPackageNames[name] = true - return string(name) -} - -var isGoKeyword = map[string]bool{ - "break": true, - "case": true, - "chan": true, - "const": true, - "continue": true, - "default": true, - "else": true, - "defer": true, - "fallthrough": true, - "for": true, - "func": true, - "go": true, - "goto": true, - "if": true, - "import": true, - "interface": true, - "map": true, - "package": true, - "range": true, - "return": true, - "select": true, - "struct": true, - "switch": true, - "type": true, - "var": true, -} - -var isGoPredeclaredIdentifier = map[string]bool{ - "append": true, - "bool": true, - "byte": true, - "cap": true, - "close": true, - "complex": true, - "complex128": true, - "complex64": true, - "copy": true, - "delete": true, - "error": true, - "false": true, - "float32": true, - "float64": true, - "imag": true, - "int": true, - "int16": true, - "int32": true, - "int64": true, - "int8": true, - "iota": true, - "len": true, - "make": true, - "new": true, - "nil": true, - "panic": true, - "print": true, - "println": true, - "real": true, - "recover": true, - "rune": true, - "string": true, - "true": true, - "uint": true, - "uint16": true, - "uint32": true, - "uint64": true, - "uint8": true, - "uintptr": true, -} - -func cleanPackageName(name string) GoPackageName { - name = strings.Map(badToUnderscore, name) - // Identifier must not be keyword or predeclared identifier: insert _. - if isGoKeyword[name] { - name = "_" + name - } - // Identifier must not begin with digit: insert _. - if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { - name = "_" + name - } - return GoPackageName(name) -} - -// defaultGoPackage returns the package name to use, -// derived from the import path of the package we're building code for. -func (g *Generator) defaultGoPackage() GoPackageName { - p := g.PackageImportPath - if i := strings.LastIndex(p, "/"); i >= 0 { - p = p[i+1:] - } - return cleanPackageName(p) -} - -// SetPackageNames sets the package name for this run. -// The package name must agree across all files being generated. -// It also defines unique package names for all imported files. -func (g *Generator) SetPackageNames() { - g.outputImportPath = g.genFiles[0].importPath - - defaultPackageNames := make(map[GoImportPath]GoPackageName) - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - defaultPackageNames[f.importPath] = p - } - } - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - // Source file: option go_package = "quux/bar"; - f.packageName = p - } else if p, ok := defaultPackageNames[f.importPath]; ok { - // A go_package option in another file in the same package. - // - // This is a poor choice in general, since every source file should - // contain a go_package option. Supported mainly for historical - // compatibility. - f.packageName = p - } else if p := g.defaultGoPackage(); p != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets a package name for files which don't - // contain a go_package option. - f.packageName = p - } else if p := f.GetPackage(); p != "" { - // Source file: package quux.bar; - f.packageName = cleanPackageName(p) - } else { - // Source filename. - f.packageName = cleanPackageName(baseName(f.GetName())) - } - } - - // Check that all files have a consistent package name and import path. - for _, f := range g.genFiles[1:] { - if a, b := g.genFiles[0].importPath, f.importPath; a != b { - g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) - } - if a, b := g.genFiles[0].packageName, f.packageName; a != b { - g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) - } - } - - // Names of support packages. These never vary (if there are conflicts, - // we rename the conflicting package), so this could be removed someday. - g.Pkg = map[string]string{ - "fmt": "fmt", - "math": "math", - "proto": "proto", - } -} - -// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos -// and FileDescriptorProtos into file-referenced objects within the Generator. -// It also creates the list of files to generate and so should be called before GenerateAllFiles. -func (g *Generator) WrapTypes() { - g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) - g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) - genFileNames := make(map[string]bool) - for _, n := range g.Request.FileToGenerate { - genFileNames[n] = true - } - for _, f := range g.Request.ProtoFile { - fd := &FileDescriptor{ - FileDescriptorProto: f, - exported: make(map[Object][]symbol), - proto3: fileIsProto3(f), - } - // The import path may be set in a number of ways. - if substitution, ok := g.ImportMap[f.GetName()]; ok { - // Command-line: M=foo.proto=quux/bar. - // - // Explicit mapping of source file to import path. - fd.importPath = GoImportPath(substitution) - } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets the import path for every file that - // we generate code for. - fd.importPath = GoImportPath(g.PackageImportPath) - } else if p, _, _ := fd.goPackageOption(); p != "" { - // Source file: option go_package = "quux/bar"; - // - // The go_package option sets the import path. Most users should use this. - fd.importPath = p - } else { - // Source filename. - // - // Last resort when nothing else is available. - fd.importPath = GoImportPath(path.Dir(f.GetName())) - } - // We must wrap the descriptors before we wrap the enums - fd.desc = wrapDescriptors(fd) - g.buildNestedDescriptors(fd.desc) - fd.enum = wrapEnumDescriptors(fd, fd.desc) - g.buildNestedEnums(fd.desc, fd.enum) - fd.ext = wrapExtensions(fd) - extractComments(fd) - g.allFiles = append(g.allFiles, fd) - g.allFilesByName[f.GetName()] = fd - } - for _, fd := range g.allFiles { - fd.imp = wrapImported(fd, g) - } - - g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) - for _, fileName := range g.Request.FileToGenerate { - fd := g.allFilesByName[fileName] - if fd == nil { - g.Fail("could not find file named", fileName) - } - g.genFiles = append(g.genFiles, fd) - } -} - -// Scan the descriptors in this file. For each one, build the slice of nested descriptors -func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { - for _, desc := range descs { - if len(desc.NestedType) != 0 { - for _, nest := range descs { - if nest.parent == desc { - desc.nested = append(desc.nested, nest) - } - } - if len(desc.nested) != len(desc.NestedType) { - g.Fail("internal error: nesting failure for", desc.GetName()) - } - } - } -} - -func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { - for _, desc := range descs { - if len(desc.EnumType) != 0 { - for _, enum := range enums { - if enum.parent == desc { - desc.enums = append(desc.enums, enum) - } - } - if len(desc.enums) != len(desc.EnumType) { - g.Fail("internal error: enum nesting failure for", desc.GetName()) - } - } - } -} - -// Construct the Descriptor -func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { - d := &Descriptor{ - common: common{file}, - DescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - d.path = fmt.Sprintf("%d,%d", messagePath, index) - } else { - d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) - } - - // The only way to distinguish a group from a message is whether - // the containing message has a TYPE_GROUP field that matches. - if parent != nil { - parts := d.TypeName() - if file.Package != nil { - parts = append([]string{*file.Package}, parts...) - } - exp := "." + strings.Join(parts, ".") - for _, field := range parent.Field { - if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { - d.group = true - break - } - } - } - - for _, field := range desc.Extension { - d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) - } - - return d -} - -// Return a slice of all the Descriptors defined within this file -func wrapDescriptors(file *FileDescriptor) []*Descriptor { - sl := make([]*Descriptor, 0, len(file.MessageType)+10) - for i, desc := range file.MessageType { - sl = wrapThisDescriptor(sl, desc, nil, file, i) - } - return sl -} - -// Wrap this Descriptor, recursively -func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { - sl = append(sl, newDescriptor(desc, parent, file, index)) - me := sl[len(sl)-1] - for i, nested := range desc.NestedType { - sl = wrapThisDescriptor(sl, nested, me, file, i) - } - return sl -} - -// Construct the EnumDescriptor -func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { - ed := &EnumDescriptor{ - common: common{file}, - EnumDescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - ed.path = fmt.Sprintf("%d,%d", enumPath, index) - } else { - ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) - } - return ed -} - -// Return a slice of all the EnumDescriptors defined within this file -func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { - sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) - // Top-level enums. - for i, enum := range file.EnumType { - sl = append(sl, newEnumDescriptor(enum, nil, file, i)) - } - // Enums within messages. Enums within embedded messages appear in the outer-most message. - for _, nested := range descs { - for i, enum := range nested.EnumType { - sl = append(sl, newEnumDescriptor(enum, nested, file, i)) - } - } - return sl -} - -// Return a slice of all the top-level ExtensionDescriptors defined within this file. -func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { - var sl []*ExtensionDescriptor - for _, field := range file.Extension { - sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) - } - return sl -} - -// Return a slice of all the types that are publicly imported into this file. -func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { - for _, index := range file.PublicDependency { - df := g.fileByName(file.Dependency[index]) - for _, d := range df.desc { - if d.GetOptions().GetMapEntry() { - continue - } - sl = append(sl, &ImportedDescriptor{common{file}, d}) - } - for _, e := range df.enum { - sl = append(sl, &ImportedDescriptor{common{file}, e}) - } - for _, ext := range df.ext { - sl = append(sl, &ImportedDescriptor{common{file}, ext}) - } - } - return -} - -func extractComments(file *FileDescriptor) { - file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) - for _, loc := range file.GetSourceCodeInfo().GetLocation() { - if loc.LeadingComments == nil { - continue - } - var p []string - for _, n := range loc.Path { - p = append(p, strconv.Itoa(int(n))) - } - file.comments[strings.Join(p, ",")] = loc - } -} - -// BuildTypeNameMap builds the map from fully qualified type names to objects. -// The key names for the map come from the input data, which puts a period at the beginning. -// It should be called after SetPackageNames and before GenerateAllFiles. -func (g *Generator) BuildTypeNameMap() { - g.typeNameToObject = make(map[string]Object) - for _, f := range g.allFiles { - // The names in this loop are defined by the proto world, not us, so the - // package name may be empty. If so, the dotted package name of X will - // be ".X"; otherwise it will be ".pkg.X". - dottedPkg := "." + f.GetPackage() - if dottedPkg != "." { - dottedPkg += "." - } - for _, enum := range f.enum { - name := dottedPkg + dottedSlice(enum.TypeName()) - g.typeNameToObject[name] = enum - } - for _, desc := range f.desc { - name := dottedPkg + dottedSlice(desc.TypeName()) - g.typeNameToObject[name] = desc - } - } -} - -// ObjectNamed, given a fully-qualified input type name as it appears in the input data, -// returns the descriptor for the message or enum with that name. -func (g *Generator) ObjectNamed(typeName string) Object { - o, ok := g.typeNameToObject[typeName] - if !ok { - g.Fail("can't find object with type", typeName) - } - return o -} - -// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. -type AnnotatedAtoms struct { - source string - path string - atoms []interface{} -} - -// Annotate records the file name and proto AST path of a list of atoms -// so that a later call to P can emit a link from each atom to its origin. -func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { - return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} -} - -// printAtom prints the (atomic, non-annotation) argument to the generated output. -func (g *Generator) printAtom(v interface{}) { - switch v := v.(type) { - case string: - g.WriteString(v) - case *string: - g.WriteString(*v) - case bool: - fmt.Fprint(g, v) - case *bool: - fmt.Fprint(g, *v) - case int: - fmt.Fprint(g, v) - case *int32: - fmt.Fprint(g, *v) - case *int64: - fmt.Fprint(g, *v) - case float64: - fmt.Fprint(g, v) - case *float64: - fmt.Fprint(g, *v) - case GoPackageName: - g.WriteString(string(v)) - case GoImportPath: - g.WriteString(strconv.Quote(string(v))) - default: - g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) - } -} - -// P prints the arguments to the generated output. It handles strings and int32s, plus -// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit -// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode -// is true). -func (g *Generator) P(str ...interface{}) { - if !g.writeOutput { - return - } - g.WriteString(g.indent) - for _, v := range str { - switch v := v.(type) { - case *AnnotatedAtoms: - begin := int32(g.Len()) - for _, v := range v.atoms { - g.printAtom(v) - } - if g.annotateCode { - end := int32(g.Len()) - var path []int32 - for _, token := range strings.Split(v.path, ",") { - val, err := strconv.ParseInt(token, 10, 32) - if err != nil { - g.Fail("could not parse proto AST path: ", err.Error()) - } - path = append(path, int32(val)) - } - g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ - Path: path, - SourceFile: &v.source, - Begin: &begin, - End: &end, - }) - } - default: - g.printAtom(v) - } - } - g.WriteByte('\n') -} - -// addInitf stores the given statement to be printed inside the file's init function. -// The statement is given as a format specifier and arguments. -func (g *Generator) addInitf(stmt string, a ...interface{}) { - g.init = append(g.init, fmt.Sprintf(stmt, a...)) -} - -// In Indents the output one tab stop. -func (g *Generator) In() { g.indent += "\t" } - -// Out unindents the output one tab stop. -func (g *Generator) Out() { - if len(g.indent) > 0 { - g.indent = g.indent[1:] - } -} - -// GenerateAllFiles generates the output for all the files we're outputting. -func (g *Generator) GenerateAllFiles() { - // Initialize the plugins - for _, p := range plugins { - p.Init(g) - } - // Generate the output. The generator runs for every file, even the files - // that we don't generate output for, so that we can collate the full list - // of exported symbols to support public imports. - genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) - for _, file := range g.genFiles { - genFileMap[file] = true - } - for _, file := range g.allFiles { - g.Reset() - g.annotations = nil - g.writeOutput = genFileMap[file] - g.generate(file) - if !g.writeOutput { - continue - } - fname := file.goFileName(g.pathType) - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(fname), - Content: proto.String(g.String()), - }) - if g.annotateCode { - // Store the generated code annotations in text, as the protoc plugin protocol requires that - // strings contain valid UTF-8. - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(file.goFileName(g.pathType) + ".meta"), - Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), - }) - } - } -} - -// Run all the plugins associated with the file. -func (g *Generator) runPlugins(file *FileDescriptor) { - for _, p := range plugins { - p.Generate(file) - } -} - -// Fill the response protocol buffer with the generated output for all the files we're -// supposed to generate. -func (g *Generator) generate(file *FileDescriptor) { - g.file = file - g.usedPackages = make(map[GoImportPath]bool) - g.packageNames = make(map[GoImportPath]GoPackageName) - g.usedPackageNames = make(map[GoPackageName]bool) - g.addedImports = make(map[GoImportPath]bool) - for name := range globalPackageNames { - g.usedPackageNames[name] = true - } - - g.P("// This is a compile-time assertion to ensure that this generated file") - g.P("// is compatible with the proto package it is being compiled against.") - g.P("// A compilation error at this line likely means your copy of the") - g.P("// proto package needs to be updated.") - g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") - g.P() - - for _, td := range g.file.imp { - g.generateImported(td) - } - for _, enum := range g.file.enum { - g.generateEnum(enum) - } - for _, desc := range g.file.desc { - // Don't generate virtual messages for maps. - if desc.GetOptions().GetMapEntry() { - continue - } - g.generateMessage(desc) - } - for _, ext := range g.file.ext { - g.generateExtension(ext) - } - g.generateInitFunction() - g.generateFileDescriptor(file) - - // Run the plugins before the imports so we know which imports are necessary. - g.runPlugins(file) - - // Generate header and imports last, though they appear first in the output. - rem := g.Buffer - remAnno := g.annotations - g.Buffer = new(bytes.Buffer) - g.annotations = nil - g.generateHeader() - g.generateImports() - if !g.writeOutput { - return - } - // Adjust the offsets for annotations displaced by the header and imports. - for _, anno := range remAnno { - *anno.Begin += int32(g.Len()) - *anno.End += int32(g.Len()) - g.annotations = append(g.annotations, anno) - } - g.Write(rem.Bytes()) - - // Reformat generated code and patch annotation locations. - fset := token.NewFileSet() - original := g.Bytes() - if g.annotateCode { - // make a copy independent of g; we'll need it after Reset. - original = append([]byte(nil), original...) - } - fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments) - if err != nil { - // Print out the bad code with line numbers. - // This should never happen in practice, but it can while changing generated code, - // so consider this a debugging aid. - var src bytes.Buffer - s := bufio.NewScanner(bytes.NewReader(original)) - for line := 1; s.Scan(); line++ { - fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) - } - g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) - } - ast.SortImports(fset, fileAST) - g.Reset() - err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST) - if err != nil { - g.Fail("generated Go source code could not be reformatted:", err.Error()) - } - if g.annotateCode { - m, err := remap.Compute(original, g.Bytes()) - if err != nil { - g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) - } - for _, anno := range g.annotations { - new, ok := m.Find(int(*anno.Begin), int(*anno.End)) - if !ok { - g.Fail("span in formatted generated Go source code could not be mapped back to the original code") - } - *anno.Begin = int32(new.Pos) - *anno.End = int32(new.End) - } - } -} - -// Generate the header, including package definition -func (g *Generator) generateHeader() { - g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") - if g.file.GetOptions().GetDeprecated() { - g.P("// ", g.file.Name, " is a deprecated file.") - } else { - g.P("// source: ", g.file.Name) - } - g.P() - g.PrintComments(strconv.Itoa(packagePath)) - g.P() - g.P("package ", g.file.packageName) - g.P() -} - -// deprecationComment is the standard comment added to deprecated -// messages, fields, enums, and enum values. -var deprecationComment = "// Deprecated: Do not use." - -// PrintComments prints any comments from the source .proto file. -// The path is a comma-separated list of integers. -// It returns an indication of whether any comments were printed. -// See descriptor.proto for its format. -func (g *Generator) PrintComments(path string) bool { - if !g.writeOutput { - return false - } - if c, ok := g.makeComments(path); ok { - g.P(c) - return true - } - return false -} - -// makeComments generates the comment string for the field, no "\n" at the end -func (g *Generator) makeComments(path string) (string, bool) { - loc, ok := g.file.comments[path] - if !ok { - return "", false - } - w := new(bytes.Buffer) - nl := "" - for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { - fmt.Fprintf(w, "%s//%s", nl, line) - nl = "\n" - } - return w.String(), true -} - -func (g *Generator) fileByName(filename string) *FileDescriptor { - return g.allFilesByName[filename] -} - -// weak returns whether the ith import of the current file is a weak import. -func (g *Generator) weak(i int32) bool { - for _, j := range g.file.WeakDependency { - if j == i { - return true - } - } - return false -} - -// Generate the imports -func (g *Generator) generateImports() { - imports := make(map[GoImportPath]GoPackageName) - for i, s := range g.file.Dependency { - fd := g.fileByName(s) - importPath := fd.importPath - // Do not import our own package. - if importPath == g.file.importPath { - continue - } - // Do not import weak imports. - if g.weak(int32(i)) { - continue - } - // Do not import a package twice. - if _, ok := imports[importPath]; ok { - continue - } - // We need to import all the dependencies, even if we don't reference them, - // because other code and tools depend on having the full transitive closure - // of protocol buffer types in the binary. - packageName := g.GoPackageName(importPath) - if _, ok := g.usedPackages[importPath]; !ok { - packageName = "_" - } - imports[importPath] = packageName - } - for importPath := range g.addedImports { - imports[importPath] = g.GoPackageName(importPath) - } - // We almost always need a proto import. Rather than computing when we - // do, which is tricky when there's a plugin, just import it and - // reference it later. The same argument applies to the fmt and math packages. - g.P("import (") - g.P(g.Pkg["fmt"] + ` "fmt"`) - g.P(g.Pkg["math"] + ` "math"`) - g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") - for importPath, packageName := range imports { - g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath) - } - g.P(")") - g.P() - // TODO: may need to worry about uniqueness across plugins - for _, p := range plugins { - p.GenerateImports(g.file) - g.P() - } - g.P("// Reference imports to suppress errors if they are not otherwise used.") - g.P("var _ = ", g.Pkg["proto"], ".Marshal") - g.P("var _ = ", g.Pkg["fmt"], ".Errorf") - g.P("var _ = ", g.Pkg["math"], ".Inf") - g.P() -} - -func (g *Generator) generateImported(id *ImportedDescriptor) { - df := id.o.File() - filename := *df.Name - if df.importPath == g.file.importPath { - // Don't generate type aliases for files in the same Go package as this one. - return - } - if !supportTypeAliases { - g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) - } - g.usedPackages[df.importPath] = true - - for _, sym := range df.exported[id.o] { - sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath)) - } - - g.P() -} - -// Generate the enum definitions for this EnumDescriptor. -func (g *Generator) generateEnum(enum *EnumDescriptor) { - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - ccPrefix := enum.prefix() - - deprecatedEnum := "" - if enum.GetOptions().GetDeprecated() { - deprecatedEnum = deprecationComment - } - g.PrintComments(enum.path) - g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) - g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) - g.P("const (") - for i, e := range enum.Value { - etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) - g.PrintComments(etorPath) - - deprecatedValue := "" - if e.GetOptions().GetDeprecated() { - deprecatedValue = deprecationComment - } - - name := ccPrefix + *e.Name - g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) - g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) - } - g.P(")") - g.P() - g.P("var ", ccTypeName, "_name = map[int32]string{") - generated := make(map[int32]bool) // avoid duplicate values - for _, e := range enum.Value { - duplicate := "" - if _, present := generated[*e.Number]; present { - duplicate = "// Duplicate value: " - } - g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") - generated[*e.Number] = true - } - g.P("}") - g.P() - g.P("var ", ccTypeName, "_value = map[string]int32{") - for _, e := range enum.Value { - g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") - } - g.P("}") - g.P() - - if !enum.proto3() { - g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") - g.P("p := new(", ccTypeName, ")") - g.P("*p = x") - g.P("return p") - g.P("}") - g.P() - } - - g.P("func (x ", ccTypeName, ") String() string {") - g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") - g.P("}") - g.P() - - if !enum.proto3() { - g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") - g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) - g.P("if err != nil {") - g.P("return err") - g.P("}") - g.P("*x = ", ccTypeName, "(value)") - g.P("return nil") - g.P("}") - g.P() - } - - var indexes []string - for m := enum.parent; m != nil; m = m.parent { - // XXX: skip groups? - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - indexes = append(indexes, strconv.Itoa(enum.index)) - g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - g.P() - if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { - g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) - g.P() - } - - g.generateEnumRegistration(enum) -} - -// The tag is a string like "varint,2,opt,name=fieldname,def=7" that -// identifies details of the field for the protocol buffer marshaling and unmarshaling -// code. The fields are: -// wire encoding -// protocol tag number -// opt,req,rep for optional, required, or repeated -// packed whether the encoding is "packed" (optional; repeated primitives only) -// name= the original declared name -// enum= the name of the enum type if it is an enum-typed field. -// proto3 if this field is in a proto3 message -// def= string representation of the default value, if any. -// The default value must be in a representation that can be used at run-time -// to generate the default value. Thus bools become 0 and 1, for instance. -func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { - optrepreq := "" - switch { - case isOptional(field): - optrepreq = "opt" - case isRequired(field): - optrepreq = "req" - case isRepeated(field): - optrepreq = "rep" - } - var defaultValue string - if dv := field.DefaultValue; dv != nil { // set means an explicit default - defaultValue = *dv - // Some types need tweaking. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - if defaultValue == "true" { - defaultValue = "1" - } else { - defaultValue = "0" - } - case descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: - // Nothing to do. Quoting is done for the whole tag. - case descriptor.FieldDescriptorProto_TYPE_ENUM: - // For enums we need to provide the integer constant. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - // It is an enum that was publicly imported. - // We need the underlying type. - obj = id.o - } - enum, ok := obj.(*EnumDescriptor) - if !ok { - log.Printf("obj is a %T", obj) - if id, ok := obj.(*ImportedDescriptor); ok { - log.Printf("id.o is a %T", id.o) - } - g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) - } - defaultValue = enum.integerValueAsString(defaultValue) - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { - if f, err := strconv.ParseFloat(defaultValue, 32); err == nil { - defaultValue = fmt.Sprint(float32(f)) - } - } - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { - if f, err := strconv.ParseFloat(defaultValue, 64); err == nil { - defaultValue = fmt.Sprint(f) - } - } - } - defaultValue = ",def=" + defaultValue - } - enum := "" - if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { - // We avoid using obj.GoPackageName(), because we want to use the - // original (proto-world) package name. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - obj = id.o - } - enum = ",enum=" - if pkg := obj.File().GetPackage(); pkg != "" { - enum += pkg + "." - } - enum += CamelCaseSlice(obj.TypeName()) - } - packed := "" - if (field.Options != nil && field.Options.GetPacked()) || - // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: - // "In proto3, repeated fields of scalar numeric types use packed encoding by default." - (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && - isRepeated(field) && isScalar(field)) { - packed = ",packed" - } - fieldName := field.GetName() - name := fieldName - if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { - // We must use the type name for groups instead of - // the field name to preserve capitalization. - // type_name in FieldDescriptorProto is fully-qualified, - // but we only want the local part. - name = *field.TypeName - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[i+1:] - } - } - if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name { - // TODO: escaping might be needed, in which case - // perhaps this should be in its own "json" tag. - name += ",json=" + json - } - name = ",name=" + name - if message.proto3() { - name += ",proto3" - } - oneof := "" - if field.OneofIndex != nil { - oneof = ",oneof" - } - return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", - wiretype, - field.GetNumber(), - optrepreq, - packed, - name, - enum, - oneof, - defaultValue)) -} - -func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { - switch typ { - case descriptor.FieldDescriptorProto_TYPE_GROUP: - return false - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - return false - case descriptor.FieldDescriptorProto_TYPE_BYTES: - return false - } - return true -} - -// TypeName is the printed name appropriate for an item. If the object is in the current file, -// TypeName drops the package name and underscores the rest. -// Otherwise the object is from another package; and the result is the underscored -// package name followed by the item name. -// The result always has an initial capital. -func (g *Generator) TypeName(obj Object) string { - return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) -} - -// GoType returns a string representing the type name, and the wire type -func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { - // TODO: Options. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - typ, wire = "float64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - typ, wire = "float32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_INT64: - typ, wire = "int64", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT64: - typ, wire = "uint64", "varint" - case descriptor.FieldDescriptorProto_TYPE_INT32: - typ, wire = "int32", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT32: - typ, wire = "uint32", "varint" - case descriptor.FieldDescriptorProto_TYPE_FIXED64: - typ, wire = "uint64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FIXED32: - typ, wire = "uint32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - typ, wire = "bool", "varint" - case descriptor.FieldDescriptorProto_TYPE_STRING: - typ, wire = "string", "bytes" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "group" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "bytes" - case descriptor.FieldDescriptorProto_TYPE_BYTES: - typ, wire = "[]byte", "bytes" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = g.TypeName(desc), "varint" - case descriptor.FieldDescriptorProto_TYPE_SFIXED32: - typ, wire = "int32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_SFIXED64: - typ, wire = "int64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_SINT32: - typ, wire = "int32", "zigzag32" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - typ, wire = "int64", "zigzag64" - default: - g.Fail("unknown type for", field.GetName()) - } - if isRepeated(field) { - typ = "[]" + typ - } else if message != nil && message.proto3() { - return - } else if field.OneofIndex != nil && message != nil { - return - } else if needsStar(*field.Type) { - typ = "*" + typ - } - return -} - -func (g *Generator) RecordTypeUse(t string) { - if _, ok := g.typeNameToObject[t]; !ok { - return - } - importPath := g.ObjectNamed(t).GoImportPath() - if importPath == g.outputImportPath { - // Don't record use of objects in our package. - return - } - g.AddImport(importPath) - g.usedPackages[importPath] = true -} - -// Method names that may be generated. Fields with these names get an -// underscore appended. Any change to this set is a potential incompatible -// API change because it changes generated field names. -var methodNames = [...]string{ - "Reset", - "String", - "ProtoMessage", - "Marshal", - "Unmarshal", - "ExtensionRangeArray", - "ExtensionMap", - "Descriptor", -} - -// Names of messages in the `google.protobuf` package for which -// we will generate XXX_WellKnownType methods. -var wellKnownTypes = map[string]bool{ - "Any": true, - "Duration": true, - "Empty": true, - "Struct": true, - "Timestamp": true, - - "Value": true, - "ListValue": true, - "DoubleValue": true, - "FloatValue": true, - "Int64Value": true, - "UInt64Value": true, - "Int32Value": true, - "UInt32Value": true, - "BoolValue": true, - "StringValue": true, - "BytesValue": true, -} - -// getterDefault finds the default value for the field to return from a getter, -// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" -func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string { - if isRepeated(field) { - return "nil" - } - if def := field.GetDefaultValue(); def != "" { - defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) - if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { - return defaultConstant - } - return "append([]byte(nil), " + defaultConstant + "...)" - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - return "false" - case descriptor.FieldDescriptorProto_TYPE_STRING: - return `""` - case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES: - return "nil" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - obj := g.ObjectNamed(field.GetTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate getter for %s", field.GetName()) - return "nil" - } - if len(enum.Value) == 0 { - return "0 // empty enum" - } - first := enum.Value[0].GetName() - return g.DefaultPackageName(obj) + enum.prefix() + first - default: - return "0" - } -} - -// defaultConstantName builds the name of the default constant from the message -// type name and the untouched field name, e.g. "Default_MessageType_FieldName" -func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { - return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) -} - -// The different types of fields in a message and how to actually print them -// Most of the logic for generateMessage is in the methods of these types. -// -// Note that the content of the field is irrelevant, a simpleField can contain -// anything from a scalar to a group (which is just a message). -// -// Extension fields (and message sets) are however handled separately. -// -// simpleField - a field that is neiter weak nor oneof, possibly repeated -// oneofField - field containing list of subfields: -// - oneofSubField - a field within the oneof - -// msgCtx contains the context for the generator functions. -type msgCtx struct { - goName string // Go struct name of the message, e.g. MessageName - message *Descriptor // The descriptor for the message -} - -// fieldCommon contains data common to all types of fields. -type fieldCommon struct { - goName string // Go name of field, e.g. "FieldName" or "Descriptor_" - protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" - getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" - goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" - tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` - fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" -} - -// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". -func (f *fieldCommon) getProtoName() string { - return f.protoName -} - -// getGoType returns the go type of the field as a string, e.g. "*int32". -func (f *fieldCommon) getGoType() string { - return f.goType -} - -// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. -type simpleField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" - comment string // The full comment for the field, e.g. "// Useful information" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *simpleField) decl(g *Generator, mc *msgCtx) { - g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) -} - -// getter prints the getter for the field. -func (f *simpleField) getter(g *Generator, mc *msgCtx) { - star := "" - tname := f.goType - if needsStar(f.protoType) && tname[0] == '*' { - tname = tname[1:] - star = "*" - } - if f.deprecated != "" { - g.P(f.deprecated) - } - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {") - if f.getterDef == "nil" { // Simpler getter - g.P("if m != nil {") - g.P("return m." + f.goName) - g.P("}") - g.P("return nil") - g.P("}") - g.P() - return - } - if mc.message.proto3() { - g.P("if m != nil {") - } else { - g.P("if m != nil && m." + f.goName + " != nil {") - } - g.P("return " + star + "m." + f.goName) - g.P("}") - g.P("return ", f.getterDef) - g.P("}") - g.P() -} - -// setter prints the setter method of the field. -func (f *simpleField) setter(g *Generator, mc *msgCtx) { - // No setter for regular fields yet -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *simpleField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *simpleField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. -type oneofSubField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" - fieldNumber int // Actual field number, as defined in proto, e.g. 12 - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" - deprecated string // Deprecation comment, if any. -} - -// typedNil prints a nil casted to the pointer to this field. -// - for XXX_OneofWrappers -func (f *oneofSubField) typedNil(g *Generator) { - g.P("(*", f.oneofTypeName, ")(nil),") -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *oneofSubField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *oneofSubField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofField represents the oneof on top level. -// The alternative fields within the oneof are represented by oneofSubField. -type oneofField struct { - fieldCommon - subFields []*oneofSubField // All the possible oneof fields - comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *oneofField) decl(g *Generator, mc *msgCtx) { - comment := f.comment - for _, sf := range f.subFields { - comment += "//\t*" + sf.oneofTypeName + "\n" - } - g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") -} - -// getter for a oneof field will print additional discriminators and interfaces for the oneof, -// also it prints all the getters for the sub fields. -func (f *oneofField) getter(g *Generator, mc *msgCtx) { - // The discriminator type - g.P("type ", f.goType, " interface {") - g.P(f.goType, "()") - g.P("}") - g.P() - // The subField types, fulfilling the discriminator type contract - for _, sf := range f.subFields { - g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {") - g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`") - g.P("}") - g.P() - } - for _, sf := range f.subFields { - g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}") - g.P() - } - // Getter for the oneof field - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {") - g.P("if m != nil { return m.", f.goName, " }") - g.P("return nil") - g.P("}") - g.P() - // Getters for each oneof - for _, sf := range f.subFields { - if sf.deprecated != "" { - g.P(sf.deprecated) - } - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {") - g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {") - g.P("return x.", sf.goName) - g.P("}") - g.P("return ", sf.getterDef) - g.P("}") - g.P() - } -} - -// setter prints the setter method of the field. -func (f *oneofField) setter(g *Generator, mc *msgCtx) { - // No setters for oneof yet -} - -// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). -type topLevelField interface { - decl(g *Generator, mc *msgCtx) // print declaration within the struct - getter(g *Generator, mc *msgCtx) // print getter - setter(g *Generator, mc *msgCtx) // print setter if applicable -} - -// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). -type defField interface { - getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" - getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" - getGoType() string // go type of the field as a string, e.g. "*int32" - getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" - getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 -} - -// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. -// explicit in the proto. -func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { - // Collect fields that can have defaults - dFields := []defField{} - for _, pf := range topLevelFields { - if f, ok := pf.(*oneofField); ok { - for _, osf := range f.subFields { - dFields = append(dFields, osf) - } - continue - } - dFields = append(dFields, pf.(defField)) - } - for _, df := range dFields { - def := df.getProtoDef() - if def == "" { - continue - } - fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) - typename := df.getGoType() - if typename[0] == '*' { - typename = typename[1:] - } - kind := "const " - switch { - case typename == "bool": - case typename == "string": - def = strconv.Quote(def) - case typename == "[]byte": - def = "[]byte(" + strconv.Quote(unescape(def)) + ")" - kind = "var " - case def == "inf", def == "-inf", def == "nan": - // These names are known to, and defined by, the protocol language. - switch def { - case "inf": - def = "math.Inf(1)" - case "-inf": - def = "math.Inf(-1)" - case "nan": - def = "math.NaN()" - } - if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { - def = "float32(" + def + ")" - } - kind = "var " - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT: - if f, err := strconv.ParseFloat(def, 32); err == nil { - def = fmt.Sprint(float32(f)) - } - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE: - if f, err := strconv.ParseFloat(def, 64); err == nil { - def = fmt.Sprint(f) - } - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: - // Must be an enum. Need to construct the prefixed name. - obj := g.ObjectNamed(df.getProtoTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate constant for %s", fieldname) - continue - } - def = g.DefaultPackageName(obj) + enum.prefix() + def - } - g.P(kind, fieldname, " ", typename, " = ", def) - g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) - } - g.P() -} - -// generateInternalStructFields just adds the XXX_ fields to the message struct. -func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { - g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals - if len(mc.message.ExtensionRange) > 0 { - messageset := "" - if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { - messageset = "protobuf_messageset:\"1\" " - } - g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") - } - g.P("XXX_unrecognized\t[]byte `json:\"-\"`") - g.P("XXX_sizecache\tint32 `json:\"-\"`") - -} - -// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. -func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { - ofields := []*oneofField{} - for _, f := range topLevelFields { - if o, ok := f.(*oneofField); ok { - ofields = append(ofields, o) - } - } - if len(ofields) == 0 { - return - } - - // OneofFuncs - g.P("// XXX_OneofWrappers is for the internal use of the proto package.") - g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {") - g.P("return []interface{}{") - for _, of := range ofields { - for _, sf := range of.subFields { - sf.typedNil(g) - } - } - g.P("}") - g.P("}") - g.P() -} - -// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. -func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { - comments := g.PrintComments(mc.message.path) - - // Guarantee deprecation comments appear after user-provided comments. - if mc.message.GetOptions().GetDeprecated() { - if comments { - // Convention: Separate deprecation comments from original - // comments with an empty line. - g.P("//") - } - g.P(deprecationComment) - } - - g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") - for _, pf := range topLevelFields { - pf.decl(g, mc) - } - g.generateInternalStructFields(mc, topLevelFields) - g.P("}") -} - -// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.getter(g, mc) - } -} - -// generateSetters add setters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.setter(g, mc) - } -} - -// generateCommonMethods adds methods to the message that are not on a per field basis. -func (g *Generator) generateCommonMethods(mc *msgCtx) { - // Reset, String and ProtoMessage methods. - g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") - g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") - g.P("func (*", mc.goName, ") ProtoMessage() {}") - var indexes []string - for m := mc.message; m != nil; m = m.parent { - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - g.P() - // TODO: Revisit the decision to use a XXX_WellKnownType method - // if we change proto.MessageName to work with multiple equivalents. - if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { - g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) - g.P() - } - - // Extension support methods - if len(mc.message.ExtensionRange) > 0 { - g.P() - g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") - for _, r := range mc.message.ExtensionRange { - end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends - g.P("{Start: ", r.Start, ", End: ", end, "},") - } - g.P("}") - g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") - g.P("return extRange_", mc.goName) - g.P("}") - g.P() - } - - // TODO: It does not scale to keep adding another method for every - // operation on protos that we want to switch over to using the - // table-driven approach. Instead, we should only add a single method - // that allows getting access to the *InternalMessageInfo struct and then - // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. - - // Wrapper for table-driven marshaling and unmarshaling. - g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") - g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") - g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") - g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message - g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") - g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") - g.P("}") - - g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") - g.P() -} - -// Generate the type, methods and default constant definitions for this Descriptor. -func (g *Generator) generateMessage(message *Descriptor) { - topLevelFields := []topLevelField{} - oFields := make(map[int32]*oneofField) - // The full type name - typeName := message.TypeName() - // The full type name, CamelCased. - goTypeName := CamelCaseSlice(typeName) - - usedNames := make(map[string]bool) - for _, n := range methodNames { - usedNames[n] = true - } - - // allocNames finds a conflict-free variation of the given strings, - // consistently mutating their suffixes. - // It returns the same number of strings. - allocNames := func(ns ...string) []string { - Loop: - for { - for _, n := range ns { - if usedNames[n] { - for i := range ns { - ns[i] += "_" - } - continue Loop - } - } - for _, n := range ns { - usedNames[n] = true - } - return ns - } - } - - mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later - - // Build a structure more suitable for generating the text in one pass - for i, field := range message.Field { - // Allocate the getter and the field at the same time so name - // collisions create field/method consistent names. - // TODO: This allocation occurs based on the order of the fields - // in the proto file, meaning that a change in the field - // ordering can change generated Method/Field names. - base := CamelCase(*field.Name) - ns := allocNames(base, "Get"+base) - fieldName, fieldGetterName := ns[0], ns[1] - typename, wiretype := g.GoType(message, field) - jsonName := *field.Name - tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") - - oneof := field.OneofIndex != nil - if oneof && oFields[*field.OneofIndex] == nil { - odp := message.OneofDecl[int(*field.OneofIndex)] - base := CamelCase(odp.GetName()) - fname := allocNames(base)[0] - - // This is the first field of a oneof we haven't seen before. - // Generate the union field. - oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) - c, ok := g.makeComments(oneofFullPath) - if ok { - c += "\n//\n" - } - c += "// Types that are valid to be assigned to " + fname + ":\n" - // Generate the rest of this comment later, - // when we've computed any disambiguation. - - dname := "is" + goTypeName + "_" + fname - tag := `protobuf_oneof:"` + odp.GetName() + `"` - of := oneofField{ - fieldCommon: fieldCommon{ - goName: fname, - getterName: "Get"+fname, - goType: dname, - tags: tag, - protoName: odp.GetName(), - fullPath: oneofFullPath, - }, - comment: c, - } - topLevelFields = append(topLevelFields, &of) - oFields[*field.OneofIndex] = &of - } - - if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { - desc := g.ObjectNamed(field.GetTypeName()) - if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { - // Figure out the Go types and tags for the key and value types. - keyField, valField := d.Field[0], d.Field[1] - keyType, keyWire := g.GoType(d, keyField) - valType, valWire := g.GoType(d, valField) - keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) - - // We don't use stars, except for message-typed values. - // Message and enum types are the only two possibly foreign types used in maps, - // so record their use. They are not permitted as map keys. - keyType = strings.TrimPrefix(keyType, "*") - switch *valField.Type { - case descriptor.FieldDescriptorProto_TYPE_ENUM: - valType = strings.TrimPrefix(valType, "*") - g.RecordTypeUse(valField.GetTypeName()) - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.RecordTypeUse(valField.GetTypeName()) - default: - valType = strings.TrimPrefix(valType, "*") - } - - typename = fmt.Sprintf("map[%s]%s", keyType, valType) - mapFieldTypes[field] = typename // record for the getter generation - - tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) - } - } - - fieldDeprecated := "" - if field.GetOptions().GetDeprecated() { - fieldDeprecated = deprecationComment - } - - dvalue := g.getterDefault(field, goTypeName) - if oneof { - tname := goTypeName + "_" + fieldName - // It is possible for this to collide with a message or enum - // nested in this message. Check for collisions. - for { - ok := true - for _, desc := range message.nested { - if CamelCaseSlice(desc.TypeName()) == tname { - ok = false - break - } - } - for _, enum := range message.enums { - if CamelCaseSlice(enum.TypeName()) == tname { - ok = false - break - } - } - if !ok { - tname += "_" - continue - } - break - } - - oneofField := oFields[*field.OneofIndex] - tag := "protobuf:" + g.goTag(message, field, wiretype) - sf := oneofSubField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), - }, - protoTypeName: field.GetTypeName(), - fieldNumber: int(*field.Number), - protoType: *field.Type, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - oneofTypeName: tname, - deprecated: fieldDeprecated, - } - oneofField.subFields = append(oneofField.subFields, &sf) - g.RecordTypeUse(field.GetTypeName()) - continue - } - - fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) - c, ok := g.makeComments(fieldFullPath) - if ok { - c += "\n" - } - rf := simpleField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fieldFullPath, - }, - protoTypeName: field.GetTypeName(), - protoType: *field.Type, - deprecated: fieldDeprecated, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - comment: c, - } - var pf topLevelField = &rf - - topLevelFields = append(topLevelFields, pf) - g.RecordTypeUse(field.GetTypeName()) - } - - mc := &msgCtx{ - goName: goTypeName, - message: message, - } - - g.generateMessageStruct(mc, topLevelFields) - g.P() - g.generateCommonMethods(mc) - g.P() - g.generateDefaultConstants(mc, topLevelFields) - g.P() - g.generateGetters(mc, topLevelFields) - g.P() - g.generateSetters(mc, topLevelFields) - g.P() - g.generateOneofFuncs(mc, topLevelFields) - g.P() - - var oneofTypes []string - for _, f := range topLevelFields { - if of, ok := f.(*oneofField); ok { - for _, osf := range of.subFields { - oneofTypes = append(oneofTypes, osf.oneofTypeName) - } - } - } - - opts := message.Options - ms := &messageSymbol{ - sym: goTypeName, - hasExtensions: len(message.ExtensionRange) > 0, - isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), - oneofTypes: oneofTypes, - } - g.file.addExport(message, ms) - - for _, ext := range message.ext { - g.generateExtension(ext) - } - - fullName := strings.Join(message.TypeName(), ".") - if g.file.Package != nil { - fullName = *g.file.Package + "." + fullName - } - - g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) - // Register types for native map types. - for _, k := range mapFieldKeys(mapFieldTypes) { - fullName := strings.TrimPrefix(*k.TypeName, ".") - g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) - } - -} - -type byTypeName []*descriptor.FieldDescriptorProto - -func (a byTypeName) Len() int { return len(a) } -func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } - -// mapFieldKeys returns the keys of m in a consistent order. -func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { - keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Sort(byTypeName(keys)) - return keys -} - -var escapeChars = [256]byte{ - 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', -} - -// unescape reverses the "C" escaping that protoc does for default values of bytes fields. -// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape -// sequences are conveyed, unmodified, into the decoded result. -func unescape(s string) string { - // NB: Sadly, we can't use strconv.Unquote because protoc will escape both - // single and double quotes, but strconv.Unquote only allows one or the - // other (based on actual surrounding quotes of its input argument). - - var out []byte - for len(s) > 0 { - // regular character, or too short to be valid escape - if s[0] != '\\' || len(s) < 2 { - out = append(out, s[0]) - s = s[1:] - } else if c := escapeChars[s[1]]; c != 0 { - // escape sequence - out = append(out, c) - s = s[2:] - } else if s[1] == 'x' || s[1] == 'X' { - // hex escape, e.g. "\x80 - if len(s) < 4 { - // too short to be valid - out = append(out, s[:2]...) - s = s[2:] - continue - } - v, err := strconv.ParseUint(s[2:4], 16, 8) - if err != nil { - out = append(out, s[:4]...) - } else { - out = append(out, byte(v)) - } - s = s[4:] - } else if '0' <= s[1] && s[1] <= '7' { - // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" - // so consume up to 2 more bytes or up to end-of-string - n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) - if n > 3 { - n = 3 - } - v, err := strconv.ParseUint(s[1:1+n], 8, 8) - if err != nil { - out = append(out, s[:1+n]...) - } else { - out = append(out, byte(v)) - } - s = s[1+n:] - } else { - // bad escape, just propagate the slash as-is - out = append(out, s[0]) - s = s[1:] - } - } - - return string(out) -} - -func (g *Generator) generateExtension(ext *ExtensionDescriptor) { - ccTypeName := ext.DescName() - - extObj := g.ObjectNamed(*ext.Extendee) - var extDesc *Descriptor - if id, ok := extObj.(*ImportedDescriptor); ok { - // This is extending a publicly imported message. - // We need the underlying type for goTag. - extDesc = id.o.(*Descriptor) - } else { - extDesc = extObj.(*Descriptor) - } - extendedType := "*" + g.TypeName(extObj) // always use the original - field := ext.FieldDescriptorProto - fieldType, wireType := g.GoType(ext.parent, field) - tag := g.goTag(extDesc, field, wireType) - g.RecordTypeUse(*ext.Extendee) - if n := ext.FieldDescriptorProto.TypeName; n != nil { - // foreign extension type - g.RecordTypeUse(*n) - } - - typeName := ext.TypeName() - - // Special case for proto2 message sets: If this extension is extending - // proto2.bridge.MessageSet, and its final name component is "message_set_extension", - // then drop that last component. - // - // TODO: This should be implemented in the text formatter rather than the generator. - // In addition, the situation for when to apply this special case is implemented - // differently in other languages: - // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 - if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { - typeName = typeName[:len(typeName)-1] - } - - // For text formatting, the package must be exactly what the .proto file declares, - // ignoring overrides such as the go_package option, and with no dot/underscore mapping. - extName := strings.Join(typeName, ".") - if g.file.Package != nil { - extName = *g.file.Package + "." + extName - } - - g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") - g.P("ExtendedType: (", extendedType, ")(nil),") - g.P("ExtensionType: (", fieldType, ")(nil),") - g.P("Field: ", field.Number, ",") - g.P(`Name: "`, extName, `",`) - g.P("Tag: ", tag, ",") - g.P(`Filename: "`, g.file.GetName(), `",`) - - g.P("}") - g.P() - - g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) - - g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) -} - -func (g *Generator) generateInitFunction() { - if len(g.init) == 0 { - return - } - g.P("func init() {") - for _, l := range g.init { - g.P(l) - } - g.P("}") - g.init = nil -} - -func (g *Generator) generateFileDescriptor(file *FileDescriptor) { - // Make a copy and trim source_code_info data. - // TODO: Trim this more when we know exactly what we need. - pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) - pb.SourceCodeInfo = nil - - b, err := proto.Marshal(pb) - if err != nil { - g.Fail(err.Error()) - } - - var buf bytes.Buffer - w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) - w.Write(b) - w.Close() - b = buf.Bytes() - - v := file.VarName() - g.P() - g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") - g.P("var ", v, " = []byte{") - g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") - for len(b) > 0 { - n := 16 - if n > len(b) { - n = len(b) - } - - s := "" - for _, c := range b[:n] { - s += fmt.Sprintf("0x%02x,", c) - } - g.P(s) - - b = b[n:] - } - g.P("}") -} - -func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { - // // We always print the full (proto-world) package name here. - pkg := enum.File().GetPackage() - if pkg != "" { - pkg += "." - } - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) -} - -// And now lots of helper functions. - -// Is c an ASCII lower-case letter? -func isASCIILower(c byte) bool { - return 'a' <= c && c <= 'z' -} - -// Is c an ASCII digit? -func isASCIIDigit(c byte) bool { - return '0' <= c && c <= '9' -} - -// CamelCase returns the CamelCased name. -// If there is an interior underscore followed by a lower case letter, -// drop the underscore and convert the letter to upper case. -// There is a remote possibility of this rewrite causing a name collision, -// but it's so remote we're prepared to pretend it's nonexistent - since the -// C++ generator lowercases names, it's extremely unlikely to have two fields -// with different capitalizations. -// In short, _my_field_name_2 becomes XMyFieldName_2. -func CamelCase(s string) string { - if s == "" { - return "" - } - t := make([]byte, 0, 32) - i := 0 - if s[0] == '_' { - // Need a capital letter; drop the '_'. - t = append(t, 'X') - i++ - } - // Invariant: if the next letter is lower case, it must be converted - // to upper case. - // That is, we process a word at a time, where words are marked by _ or - // upper case letter. Digits are treated as words. - for ; i < len(s); i++ { - c := s[i] - if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { - continue // Skip the underscore in s. - } - if isASCIIDigit(c) { - t = append(t, c) - continue - } - // Assume we have a letter now - if not, it's a bogus identifier. - // The next word is a sequence of characters that must start upper case. - if isASCIILower(c) { - c ^= ' ' // Make it a capital letter. - } - t = append(t, c) // Guaranteed not lower case. - // Accept lower case sequence that follows. - for i+1 < len(s) && isASCIILower(s[i+1]) { - i++ - t = append(t, s[i]) - } - } - return string(t) -} - -// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to -// be joined with "_". -func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } - -// dottedSlice turns a sliced name into a dotted name. -func dottedSlice(elem []string) string { return strings.Join(elem, ".") } - -// Is this field optional? -func isOptional(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL -} - -// Is this field required? -func isRequired(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED -} - -// Is this field repeated? -func isRepeated(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED -} - -// Is this field a scalar numeric type? -func isScalar(field *descriptor.FieldDescriptorProto) bool { - if field.Type == nil { - return false - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE, - descriptor.FieldDescriptorProto_TYPE_FLOAT, - descriptor.FieldDescriptorProto_TYPE_INT64, - descriptor.FieldDescriptorProto_TYPE_UINT64, - descriptor.FieldDescriptorProto_TYPE_INT32, - descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_BOOL, - descriptor.FieldDescriptorProto_TYPE_UINT32, - descriptor.FieldDescriptorProto_TYPE_ENUM, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_SINT32, - descriptor.FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} - -// badToUnderscore is the mapping function used to generate Go names from package names, -// which can be dotted in the input .proto file. It replaces non-identifier characters such as -// dot or dash with underscore. -func badToUnderscore(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { - return r - } - return '_' -} - -// baseName returns the last path element of the name, with the last dotted suffix removed. -func baseName(name string) string { - // First, find the last element - if i := strings.LastIndex(name, "/"); i >= 0 { - name = name[i+1:] - } - // Now drop the suffix - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[0:i] - } - return name -} - -// The SourceCodeInfo message describes the location of elements of a parsed -// .proto file by way of a "path", which is a sequence of integers that -// describe the route from a FileDescriptorProto to the relevant submessage. -// The path alternates between a field number of a repeated field, and an index -// into that repeated field. The constants below define the field numbers that -// are used. -// -// See descriptor.proto for more information about this. -const ( - // tag numbers in FileDescriptorProto - packagePath = 2 // package - messagePath = 4 // message_type - enumPath = 5 // enum_type - // tag numbers in DescriptorProto - messageFieldPath = 2 // field - messageMessagePath = 3 // nested_type - messageEnumPath = 4 // enum_type - messageOneofPath = 8 // oneof_decl - // tag numbers in EnumDescriptorProto - enumValuePath = 2 // value -) - -var supportTypeAliases bool - -func init() { - for _, tag := range build.Default.ReleaseTags { - if tag == "go1.9" { - supportTypeAliases = true - return - } - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go deleted file mode 100644 index a9b61036cc..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go +++ /dev/null @@ -1,117 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package remap handles tracking the locations of Go tokens in a source text -across a rewrite by the Go formatter. -*/ -package remap - -import ( - "fmt" - "go/scanner" - "go/token" -) - -// A Location represents a span of byte offsets in the source text. -type Location struct { - Pos, End int // End is exclusive -} - -// A Map represents a mapping between token locations in an input source text -// and locations in the correspnding output text. -type Map map[Location]Location - -// Find reports whether the specified span is recorded by m, and if so returns -// the new location it was mapped to. If the input span was not found, the -// returned location is the same as the input. -func (m Map) Find(pos, end int) (Location, bool) { - key := Location{ - Pos: pos, - End: end, - } - if loc, ok := m[key]; ok { - return loc, true - } - return key, false -} - -func (m Map) add(opos, oend, npos, nend int) { - m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} -} - -// Compute constructs a location mapping from input to output. An error is -// reported if any of the tokens of output cannot be mapped. -func Compute(input, output []byte) (Map, error) { - itok := tokenize(input) - otok := tokenize(output) - if len(itok) != len(otok) { - return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) - } - m := make(Map) - for i, ti := range itok { - to := otok[i] - if ti.Token != to.Token { - return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) - } - m.add(ti.pos, ti.end, to.pos, to.end) - } - return m, nil -} - -// tokinfo records the span and type of a source token. -type tokinfo struct { - pos, end int - token.Token -} - -func tokenize(src []byte) []tokinfo { - fs := token.NewFileSet() - var s scanner.Scanner - s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) - var info []tokinfo - for { - pos, next, lit := s.Scan() - switch next { - case token.SEMICOLON: - continue - } - info = append(info, tokinfo{ - pos: int(pos - 1), - end: int(pos + token.Pos(len(lit)) - 1), - Token: next, - }) - if next == token.EOF { - break - } - } - return info -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go deleted file mode 100644 index 61bfc10e02..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go +++ /dev/null @@ -1,369 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/compiler/plugin.proto - -/* -Package plugin_go is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/compiler/plugin.proto - -It has these top-level messages: - Version - CodeGeneratorRequest - CodeGeneratorResponse -*/ -package plugin_go - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The version number of protocol compiler. -type Version struct { - Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (m *Version) Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (dst *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(dst, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetMajor() int32 { - if m != nil && m.Major != nil { - return *m.Major - } - return 0 -} - -func (m *Version) GetMinor() int32 { - if m != nil && m.Minor != nil { - return *m.Minor - } - return 0 -} - -func (m *Version) GetPatch() int32 { - if m != nil && m.Patch != nil { - return *m.Patch - } - return 0 -} - -func (m *Version) GetSuffix() string { - if m != nil && m.Suffix != nil { - return *m.Suffix - } - return "" -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -type CodeGeneratorRequest struct { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` - // The generator parameter passed on the command-line. - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` - // The version number of protocol compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } -func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorRequest) ProtoMessage() {} -func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) -} -func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) -} -func (m *CodeGeneratorRequest) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorRequest.Size(m) -} -func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo - -func (m *CodeGeneratorRequest) GetFileToGenerate() []string { - if m != nil { - return m.FileToGenerate - } - return nil -} - -func (m *CodeGeneratorRequest) GetParameter() string { - if m != nil && m.Parameter != nil { - return *m.Parameter - } - return "" -} - -func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { - if m != nil { - return m.ProtoFile - } - return nil -} - -func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion - } - return nil -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -type CodeGeneratorResponse struct { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } -func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse) ProtoMessage() {} -func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) -} -func (m *CodeGeneratorResponse) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse.Size(m) -} -func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo - -func (m *CodeGeneratorResponse) GetError() string { - if m != nil && m.Error != nil { - return *m.Error - } - return "" -} - -func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { - if m != nil { - return m.File - } - return nil -} - -// Represents a single generated file. -type CodeGeneratorResponse_File struct { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` - // The file contents. - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } -func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} -func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } -func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) -} -func (m *CodeGeneratorResponse_File) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) -} -func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo - -func (m *CodeGeneratorResponse_File) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { - if m != nil && m.InsertionPoint != nil { - return *m.InsertionPoint - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetContent() string { - if m != nil && m.Content != nil { - return *m.Content - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") - proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") - proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") - proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") -} - -func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 417 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, - 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, - 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, - 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, - 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, - 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, - 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, - 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, - 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, - 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, - 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, - 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, - 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, - 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, - 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, - 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, - 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, - 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, - 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, - 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, - 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, - 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, - 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, - 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, - 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, - 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, - 0x00, -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden deleted file mode 100644 index 8953d0ff82..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google/protobuf/compiler/plugin.proto -// DO NOT EDIT! - -package google_protobuf_compiler - -import proto "github.com/golang/protobuf/proto" -import "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference proto and math imports to suppress error if they are not otherwise used. -var _ = proto.GetString -var _ = math.Inf - -type CodeGeneratorRequest struct { - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } -func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorRequest) ProtoMessage() {} - -func (this *CodeGeneratorRequest) GetParameter() string { - if this != nil && this.Parameter != nil { - return *this.Parameter - } - return "" -} - -type CodeGeneratorResponse struct { - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } -func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse) ProtoMessage() {} - -func (this *CodeGeneratorResponse) GetError() string { - if this != nil && this.Error != nil { - return *this.Error - } - return "" -} - -type CodeGeneratorResponse_File struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } -func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} - -func (this *CodeGeneratorResponse_File) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { - if this != nil && this.InsertionPoint != nil { - return *this.InsertionPoint - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetContent() string { - if this != nil && this.Content != nil { - return *this.Content - } - return "" -} - -func init() { -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto deleted file mode 100644 index 5b5574529e..0000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto +++ /dev/null @@ -1,167 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// -// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to -// change. -// -// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is -// just a program that reads a CodeGeneratorRequest from stdin and writes a -// CodeGeneratorResponse to stdout. -// -// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead -// of dealing with the raw protocol defined here. -// -// A plugin executable needs only to be placed somewhere in the path. The -// plugin should be named "protoc-gen-$NAME", and will then be used when the -// flag "--${NAME}_out" is passed to protoc. - -syntax = "proto2"; -package google.protobuf.compiler; -option java_package = "com.google.protobuf.compiler"; -option java_outer_classname = "PluginProtos"; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; - -import "google/protobuf/descriptor.proto"; - -// The version number of protocol compiler. -message Version { - optional int32 major = 1; - optional int32 minor = 2; - optional int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - optional string suffix = 4; -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -message CodeGeneratorRequest { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - repeated string file_to_generate = 1; - - // The generator parameter passed on the command-line. - optional string parameter = 2; - - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - repeated FileDescriptorProto proto_file = 15; - - // The version number of protocol compiler. - optional Version compiler_version = 3; - -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -message CodeGeneratorResponse { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - optional string error = 1; - - // Represents a single generated file. - message File { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - optional string name = 1; - - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - optional string insertion_point = 2; - - // The file contents. - optional string content = 15; - } - repeated File file = 15; -} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go index 8040dcb0c1..3fd1b0b84b 100644 --- a/vendor/github.com/googleapis/gax-go/v2/gax.go +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -36,4 +36,4 @@ package gax // Version specifies the gax-go version being used. -const Version = "2.0.3" +const Version = "2.0.4" diff --git a/vendor/github.com/googleapis/gax-go/v2/go.mod b/vendor/github.com/googleapis/gax-go/v2/go.mod index 0c91100b9d..9cdfaf4475 100644 --- a/vendor/github.com/googleapis/gax-go/v2/go.mod +++ b/vendor/github.com/googleapis/gax-go/v2/go.mod @@ -1,5 +1,3 @@ module github.com/googleapis/gax-go/v2 -go 1.12 - require google.golang.org/grpc v1.19.0 diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt deleted file mode 100644 index 364516251b..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015, Gengo, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Gengo, Inc. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel deleted file mode 100644 index 76cafe6ec7..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -package(default_visibility = ["//visibility:public"]) - -proto_library( - name = "internal_proto", - srcs = ["stream_chunk.proto"], - deps = ["@com_google_protobuf//:any_proto"], -) - -go_proto_library( - name = "internal_go_proto", - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", - proto = ":internal_proto", -) - -go_library( - name = "go_default_library", - embed = [":internal_go_proto"], - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go deleted file mode 100644 index 8858f06904..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: internal/stream_chunk.proto - -package internal - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -type StreamError struct { - GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` - HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` - Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StreamError) Reset() { *m = StreamError{} } -func (m *StreamError) String() string { return proto.CompactTextString(m) } -func (*StreamError) ProtoMessage() {} -func (*StreamError) Descriptor() ([]byte, []int) { - return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0} -} -func (m *StreamError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamError.Unmarshal(m, b) -} -func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) -} -func (dst *StreamError) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamError.Merge(dst, src) -} -func (m *StreamError) XXX_Size() int { - return xxx_messageInfo_StreamError.Size(m) -} -func (m *StreamError) XXX_DiscardUnknown() { - xxx_messageInfo_StreamError.DiscardUnknown(m) -} - -var xxx_messageInfo_StreamError proto.InternalMessageInfo - -func (m *StreamError) GetGrpcCode() int32 { - if m != nil { - return m.GrpcCode - } - return 0 -} - -func (m *StreamError) GetHttpCode() int32 { - if m != nil { - return m.HttpCode - } - return 0 -} - -func (m *StreamError) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *StreamError) GetHttpStatus() string { - if m != nil { - return m.HttpStatus - } - return "" -} - -func (m *StreamError) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -func init() { - proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") -} - -func init() { - proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7) -} - -var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{ - // 223 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30, - 0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23, - 0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78, - 0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce, - 0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2, - 0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a, - 0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f, - 0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54, - 0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7, - 0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5, - 0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9, - 0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac, - 0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18, - 0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto deleted file mode 100644 index 55f42ce63e..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; -package grpc.gateway.runtime; -option go_package = "internal"; - -import "google/protobuf/any.proto"; - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -message StreamError { - int32 grpc_code = 1; - int32 http_code = 2; - string message = 3; - string http_status = 4; - repeated google.protobuf.Any details = 5; -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel deleted file mode 100644 index c99f83e585..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel +++ /dev/null @@ -1,80 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "convert.go", - "doc.go", - "errors.go", - "fieldmask.go", - "handler.go", - "marshal_json.go", - "marshal_jsonpb.go", - "marshal_proto.go", - "marshaler.go", - "marshaler_registry.go", - "mux.go", - "pattern.go", - "proto2_convert.go", - "proto_errors.go", - "query.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", - deps = [ - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//grpclog:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "context_test.go", - "errors_test.go", - "fieldmask_test.go", - "handler_test.go", - "marshal_json_test.go", - "marshal_jsonpb_test.go", - "marshal_proto_test.go", - "marshaler_registry_test.go", - "mux_test.go", - "pattern_test.go", - "query_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//examples/proto/examplepb:go_default_library", - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/rpc:errdetails_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:empty_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//:go_default_library", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go deleted file mode 100644 index 896057e1e1..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ /dev/null @@ -1,210 +0,0 @@ -package runtime - -import ( - "context" - "encoding/base64" - "fmt" - "net" - "net/http" - "net/textproto" - "strconv" - "strings" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// MetadataHeaderPrefix is the http prefix that represents custom metadata -// parameters to or from a gRPC call. -const MetadataHeaderPrefix = "Grpc-Metadata-" - -// MetadataPrefix is prepended to permanent HTTP header keys (as specified -// by the IANA) when added to the gRPC context. -const MetadataPrefix = "grpcgateway-" - -// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to -// HTTP headers in a response handled by grpc-gateway -const MetadataTrailerPrefix = "Grpc-Trailer-" - -const metadataGrpcTimeout = "Grpc-Timeout" -const metadataHeaderBinarySuffix = "-Bin" - -const xForwardedFor = "X-Forwarded-For" -const xForwardedHost = "X-Forwarded-Host" - -var ( - // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound - // header isn't present. If the value is 0 the sent `context` will not have a timeout. - DefaultContextTimeout = 0 * time.Second -) - -func decodeBinHeader(v string) ([]byte, error) { - if len(v)%4 == 0 { - // Input was padded, or padding was not necessary. - return base64.StdEncoding.DecodeString(v) - } - return base64.RawStdEncoding.DecodeString(v) -} - -/* -AnnotateContext adds context information such as metadata from the request. - -At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", -except that the forwarded destination is not another HTTP service but rather -a gRPC service. -*/ -func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { - var pairs []string - timeout := DefaultContextTimeout - if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { - var err error - timeout, err = timeoutDecode(tm) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) - } - } - - for key, vals := range req.Header { - for _, val := range vals { - key = textproto.CanonicalMIMEHeaderKey(key) - // For backwards-compatibility, pass through 'authorization' header with no prefix. - if key == "Authorization" { - pairs = append(pairs, "authorization", val) - } - if h, ok := mux.incomingHeaderMatcher(key); ok { - // Handles "-bin" metadata in grpc, since grpc will do another base64 - // encode before sending to server, we need to decode it first. - if strings.HasSuffix(key, metadataHeaderBinarySuffix) { - b, err := decodeBinHeader(val) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) - } - - val = string(b) - } - pairs = append(pairs, h, val) - } - } - } - if host := req.Header.Get(xForwardedHost); host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), host) - } else if req.Host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) - } - - if addr := req.RemoteAddr; addr != "" { - if remoteIP, _, err := net.SplitHostPort(addr); err == nil { - if fwd := req.Header.Get(xForwardedFor); fwd == "" { - pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) - } else { - pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) - } - } else { - grpclog.Infof("invalid remote addr: %s", addr) - } - } - - if timeout != 0 { - ctx, _ = context.WithTimeout(ctx, timeout) - } - if len(pairs) == 0 { - return ctx, nil - } - md := metadata.Pairs(pairs...) - for _, mda := range mux.metadataAnnotators { - md = metadata.Join(md, mda(ctx, req)) - } - return metadata.NewOutgoingContext(ctx, md), nil -} - -// ServerMetadata consists of metadata sent from gRPC server. -type ServerMetadata struct { - HeaderMD metadata.MD - TrailerMD metadata.MD -} - -type serverMetadataKey struct{} - -// NewServerMetadataContext creates a new context with ServerMetadata -func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { - return context.WithValue(ctx, serverMetadataKey{}, md) -} - -// ServerMetadataFromContext returns the ServerMetadata in ctx -func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { - md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) - return -} - -func timeoutDecode(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("timeout string is too short: %q", s) - } - d, ok := timeoutUnitToDuration(s[size-1]) - if !ok { - return 0, fmt.Errorf("timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - return d * time.Duration(t), nil -} - -func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { - switch u { - case 'H': - return time.Hour, true - case 'M': - return time.Minute, true - case 'S': - return time.Second, true - case 'm': - return time.Millisecond, true - case 'u': - return time.Microsecond, true - case 'n': - return time.Nanosecond, true - default: - } - return -} - -// isPermanentHTTPHeader checks whether hdr belongs to the list of -// permenant request headers maintained by IANA. -// http://www.iana.org/assignments/message-headers/message-headers.xml -func isPermanentHTTPHeader(hdr string) bool { - switch hdr { - case - "Accept", - "Accept-Charset", - "Accept-Language", - "Accept-Ranges", - "Authorization", - "Cache-Control", - "Content-Type", - "Cookie", - "Date", - "Expect", - "From", - "Host", - "If-Match", - "If-Modified-Since", - "If-None-Match", - "If-Schedule-Tag-Match", - "If-Unmodified-Since", - "Max-Forwards", - "Origin", - "Pragma", - "Referer", - "User-Agent", - "Via", - "Warning": - return true - } - return false -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go deleted file mode 100644 index a5b3bd6a79..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go +++ /dev/null @@ -1,312 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "strconv" - "strings" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/golang/protobuf/ptypes/wrappers" -) - -// String just returns the given string. -// It is just for compatibility to other types. -func String(val string) (string, error) { - return val, nil -} - -// StringSlice converts 'val' where individual strings are separated by -// 'sep' into a string slice. -func StringSlice(val, sep string) ([]string, error) { - return strings.Split(val, sep), nil -} - -// Bool converts the given string representation of a boolean value into bool. -func Bool(val string) (bool, error) { - return strconv.ParseBool(val) -} - -// BoolSlice converts 'val' where individual booleans are separated by -// 'sep' into a bool slice. -func BoolSlice(val, sep string) ([]bool, error) { - s := strings.Split(val, sep) - values := make([]bool, len(s)) - for i, v := range s { - value, err := Bool(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Float64 converts the given string representation into representation of a floating point number into float64. -func Float64(val string) (float64, error) { - return strconv.ParseFloat(val, 64) -} - -// Float64Slice converts 'val' where individual floating point numbers are separated by -// 'sep' into a float64 slice. -func Float64Slice(val, sep string) ([]float64, error) { - s := strings.Split(val, sep) - values := make([]float64, len(s)) - for i, v := range s { - value, err := Float64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Float32 converts the given string representation of a floating point number into float32. -func Float32(val string) (float32, error) { - f, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// Float32Slice converts 'val' where individual floating point numbers are separated by -// 'sep' into a float32 slice. -func Float32Slice(val, sep string) ([]float32, error) { - s := strings.Split(val, sep) - values := make([]float32, len(s)) - for i, v := range s { - value, err := Float32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Int64 converts the given string representation of an integer into int64. -func Int64(val string) (int64, error) { - return strconv.ParseInt(val, 0, 64) -} - -// Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. -func Int64Slice(val, sep string) ([]int64, error) { - s := strings.Split(val, sep) - values := make([]int64, len(s)) - for i, v := range s { - value, err := Int64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Int32 converts the given string representation of an integer into int32. -func Int32(val string) (int32, error) { - i, err := strconv.ParseInt(val, 0, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. -func Int32Slice(val, sep string) ([]int32, error) { - s := strings.Split(val, sep) - values := make([]int32, len(s)) - for i, v := range s { - value, err := Int32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Uint64 converts the given string representation of an integer into uint64. -func Uint64(val string) (uint64, error) { - return strconv.ParseUint(val, 0, 64) -} - -// Uint64Slice converts 'val' where individual integers are separated by -// 'sep' into a uint64 slice. -func Uint64Slice(val, sep string) ([]uint64, error) { - s := strings.Split(val, sep) - values := make([]uint64, len(s)) - for i, v := range s { - value, err := Uint64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Uint32 converts the given string representation of an integer into uint32. -func Uint32(val string) (uint32, error) { - i, err := strconv.ParseUint(val, 0, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} - -// Uint32Slice converts 'val' where individual integers are separated by -// 'sep' into a uint32 slice. -func Uint32Slice(val, sep string) ([]uint32, error) { - s := strings.Split(val, sep) - values := make([]uint32, len(s)) - for i, v := range s { - value, err := Uint32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Bytes converts the given string representation of a byte sequence into a slice of bytes -// A bytes sequence is encoded in URL-safe base64 without padding -func Bytes(val string) ([]byte, error) { - b, err := base64.StdEncoding.DecodeString(val) - if err != nil { - b, err = base64.URLEncoding.DecodeString(val) - if err != nil { - return nil, err - } - } - return b, nil -} - -// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. -func BytesSlice(val, sep string) ([][]byte, error) { - s := strings.Split(val, sep) - values := make([][]byte, len(s)) - for i, v := range s { - value, err := Bytes(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. -func Timestamp(val string) (*timestamp.Timestamp, error) { - var r *timestamp.Timestamp - err := jsonpb.UnmarshalString(val, r) - return r, err -} - -// Duration converts the given string into a timestamp.Duration. -func Duration(val string) (*duration.Duration, error) { - var r *duration.Duration - err := jsonpb.UnmarshalString(val, r) - return r, err -} - -// Enum converts the given string into an int32 that should be type casted into the -// correct enum proto type. -func Enum(val string, enumValMap map[string]int32) (int32, error) { - e, ok := enumValMap[val] - if ok { - return e, nil - } - - i, err := Int32(val) - if err != nil { - return 0, fmt.Errorf("%s is not valid", val) - } - for _, v := range enumValMap { - if v == i { - return i, nil - } - } - return 0, fmt.Errorf("%s is not valid", val) -} - -// EnumSlice converts 'val' where individual enums are separated by 'sep' -// into a int32 slice. Each individual int32 should be type casted into the -// correct enum proto type. -func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { - s := strings.Split(val, sep) - values := make([]int32, len(s)) - for i, v := range s { - value, err := Enum(v, enumValMap) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -/* - Support fot google.protobuf.wrappers on top of primitive types -*/ - -// StringValue well-known type support as wrapper around string type -func StringValue(val string) (*wrappers.StringValue, error) { - return &wrappers.StringValue{Value: val}, nil -} - -// FloatValue well-known type support as wrapper around float32 type -func FloatValue(val string) (*wrappers.FloatValue, error) { - parsedVal, err := Float32(val) - return &wrappers.FloatValue{Value: parsedVal}, err -} - -// DoubleValue well-known type support as wrapper around float64 type -func DoubleValue(val string) (*wrappers.DoubleValue, error) { - parsedVal, err := Float64(val) - return &wrappers.DoubleValue{Value: parsedVal}, err -} - -// BoolValue well-known type support as wrapper around bool type -func BoolValue(val string) (*wrappers.BoolValue, error) { - parsedVal, err := Bool(val) - return &wrappers.BoolValue{Value: parsedVal}, err -} - -// Int32Value well-known type support as wrapper around int32 type -func Int32Value(val string) (*wrappers.Int32Value, error) { - parsedVal, err := Int32(val) - return &wrappers.Int32Value{Value: parsedVal}, err -} - -// UInt32Value well-known type support as wrapper around uint32 type -func UInt32Value(val string) (*wrappers.UInt32Value, error) { - parsedVal, err := Uint32(val) - return &wrappers.UInt32Value{Value: parsedVal}, err -} - -// Int64Value well-known type support as wrapper around int64 type -func Int64Value(val string) (*wrappers.Int64Value, error) { - parsedVal, err := Int64(val) - return &wrappers.Int64Value{Value: parsedVal}, err -} - -// UInt64Value well-known type support as wrapper around uint64 type -func UInt64Value(val string) (*wrappers.UInt64Value, error) { - parsedVal, err := Uint64(val) - return &wrappers.UInt64Value{Value: parsedVal}, err -} - -// BytesValue well-known type support as wrapper around bytes[] type -func BytesValue(val string) (*wrappers.BytesValue, error) { - parsedVal, err := Bytes(val) - return &wrappers.BytesValue{Value: parsedVal}, err -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go deleted file mode 100644 index b6e5ddf7a9..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -/* -Package runtime contains runtime helper functions used by -servers which protoc-gen-grpc-gateway generates. -*/ -package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go deleted file mode 100644 index 41d54ef916..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ /dev/null @@ -1,145 +0,0 @@ -package runtime - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. -// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -func HTTPStatusFromCode(code codes.Code) int { - switch code { - case codes.OK: - return http.StatusOK - case codes.Canceled: - return http.StatusRequestTimeout - case codes.Unknown: - return http.StatusInternalServerError - case codes.InvalidArgument: - return http.StatusBadRequest - case codes.DeadlineExceeded: - return http.StatusGatewayTimeout - case codes.NotFound: - return http.StatusNotFound - case codes.AlreadyExists: - return http.StatusConflict - case codes.PermissionDenied: - return http.StatusForbidden - case codes.Unauthenticated: - return http.StatusUnauthorized - case codes.ResourceExhausted: - return http.StatusTooManyRequests - case codes.FailedPrecondition: - return http.StatusPreconditionFailed - case codes.Aborted: - return http.StatusConflict - case codes.OutOfRange: - return http.StatusBadRequest - case codes.Unimplemented: - return http.StatusNotImplemented - case codes.Internal: - return http.StatusInternalServerError - case codes.Unavailable: - return http.StatusServiceUnavailable - case codes.DataLoss: - return http.StatusInternalServerError - } - - grpclog.Infof("Unknown gRPC error code: %v", code) - return http.StatusInternalServerError -} - -var ( - // HTTPError replies to the request with the error. - // You can set a custom function to this variable to customize error format. - HTTPError = DefaultHTTPError - // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest - OtherErrorHandler = DefaultOtherErrorHandler -) - -type errorBody struct { - Error string `protobuf:"bytes,1,name=error" json:"error"` - // This is to make the error more compatible with users that expect errors to be Status objects: - // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto - // It should be the exact same message as the Error field. - Message string `protobuf:"bytes,1,name=message" json:"message"` - Code int32 `protobuf:"varint,2,name=code" json:"code"` - Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` -} - -// Make this also conform to proto.Message for builtin JSONPb Marshaler -func (e *errorBody) Reset() { *e = errorBody{} } -func (e *errorBody) String() string { return proto.CompactTextString(e) } -func (*errorBody) ProtoMessage() {} - -// DefaultHTTPError is the default implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a JSON object, -// which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - const fallback = `{"error": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { - pb := s.Proto() - contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - body := &errorBody{ - Error: s.Message(), - Message: s.Message(), - Code: int32(s.Code()), - Details: s.Proto().GetDetails(), - } - - buf, merr := marshaler.Marshal(body) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", body, merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. -// It simply writes a string representation of the given error into "w". -func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { - http.Error(w, msg, code) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go deleted file mode 100644 index e1cf7a9146..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go +++ /dev/null @@ -1,70 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" - "strings" - - "github.com/golang/protobuf/protoc-gen-go/generator" - "google.golang.org/genproto/protobuf/field_mask" -) - -// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. -func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) { - fm := &field_mask.FieldMask{} - var root interface{} - if err := json.NewDecoder(r).Decode(&root); err != nil { - if err == io.EOF { - return fm, nil - } - return nil, err - } - - queue := []fieldMaskPathItem{{node: root}} - for len(queue) > 0 { - // dequeue an item - item := queue[0] - queue = queue[1:] - - if m, ok := item.node.(map[string]interface{}); ok { - // if the item is an object, then enqueue all of its children - for k, v := range m { - queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v}) - } - } else if len(item.path) > 0 { - // otherwise, it's a leaf node so print its path - fm.Paths = append(fm.Paths, strings.Join(item.path, ".")) - } - } - - return fm, nil -} - -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask -type fieldMaskPathItem struct { - // the list of prior fields leading up to node - path []string - - // a generic decoded json object the current item to inspect for further path extraction - node interface{} -} - -// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic -// that's used for naming protobuf fields in Go. -func CamelCaseFieldMask(mask *field_mask.FieldMask) { - if mask == nil || mask.Paths == nil { - return - } - - var newPaths []string - for _, path := range mask.Paths { - lowerCasedParts := strings.Split(path, ".") - var camelCasedParts []string - for _, part := range lowerCasedParts { - camelCasedParts = append(camelCasedParts, generator.CamelCase(part)) - } - newPaths = append(newPaths, strings.Join(camelCasedParts, ".")) - } - - mask.Paths = newPaths -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go deleted file mode 100644 index 1fc63f7f58..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ /dev/null @@ -1,215 +0,0 @@ -package runtime - -import ( - "fmt" - "io" - "net/http" - "net/textproto" - - "context" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// ForwardResponseStream forwards the stream from gRPC server to REST client. -func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - f, ok := w.(http.Flusher) - if !ok { - grpclog.Infof("Flush not supported in %T", w) - http.Error(w, "unexpected type of web server", http.StatusInternalServerError) - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - http.Error(w, "unexpected error", http.StatusInternalServerError) - return - } - handleForwardResponseServerMetadata(w, mux, md) - - w.Header().Set("Transfer-Encoding", "chunked") - w.Header().Set("Content-Type", marshaler.ContentType()) - if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - - var delimiter []byte - if d, ok := marshaler.(Delimited); ok { - delimiter = d.Delimiter() - } else { - delimiter = []byte("\n") - } - - var wroteHeader bool - for { - resp, err := recv() - if err == io.EOF { - return - } - if err != nil { - handleForwardResponseStreamError(wroteHeader, marshaler, w, err) - return - } - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - handleForwardResponseStreamError(wroteHeader, marshaler, w, err) - return - } - - buf, err := marshaler.Marshal(streamChunk(resp, nil)) - if err != nil { - grpclog.Infof("Failed to marshal response chunk: %v", err) - handleForwardResponseStreamError(wroteHeader, marshaler, w, err) - return - } - if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to send response chunk: %v", err) - return - } - wroteHeader = true - if _, err = w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) - return - } - f.Flush() - } -} - -func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { - for k, vs := range md.HeaderMD { - if h, ok := mux.outgoingHeaderMatcher(k); ok { - for _, v := range vs { - w.Header().Add(h, v) - } - } - } -} - -func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { - for k := range md.TrailerMD { - tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) - w.Header().Add("Trailer", tKey) - } -} - -func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { - for k, vs := range md.TrailerMD { - tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) - for _, v := range vs { - w.Header().Add(tKey, v) - } - } -} - -// responseBody interface contains method for getting field for marshaling to the response body -// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` -type responseBody interface { - XXX_ResponseBody() interface{} -} - -// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. -func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { - contentType = httpBodyMarshaler.ContentTypeFromMessage(resp) - } - w.Header().Set("Content-Type", contentType) - - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { - buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) - } else { - buf, err = marshaler.Marshal(resp) - } - if err != nil { - grpclog.Infof("Marshal error: %v", err) - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - - if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { - if len(opts) == 0 { - return nil - } - for _, opt := range opts { - if err := opt(ctx, w, resp); err != nil { - grpclog.Infof("Error handling ForwardResponseOptions: %v", err) - return err - } - } - return nil -} - -func handleForwardResponseStreamError(wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, err error) { - buf, merr := marshaler.Marshal(streamChunk(nil, err)) - if merr != nil { - grpclog.Infof("Failed to marshal an error: %v", merr) - return - } - if !wroteHeader { - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - w.WriteHeader(HTTPStatusFromCode(s.Code())) - } - if _, werr := w.Write(buf); werr != nil { - grpclog.Infof("Failed to notify error to client: %v", werr) - return - } -} - -func streamChunk(result proto.Message, err error) map[string]proto.Message { - if err != nil { - grpcCode := codes.Unknown - grpcMessage := err.Error() - var grpcDetails []*any.Any - if s, ok := status.FromError(err); ok { - grpcCode = s.Code() - grpcMessage = s.Message() - grpcDetails = s.Proto().GetDetails() - } - httpCode := HTTPStatusFromCode(grpcCode) - return map[string]proto.Message{ - "error": &internal.StreamError{ - GrpcCode: int32(grpcCode), - HttpCode: int32(httpCode), - Message: grpcMessage, - HttpStatus: http.StatusText(httpCode), - Details: grpcDetails, - }, - } - } - if result == nil { - return streamChunk(nil, fmt.Errorf("empty response")) - } - return map[string]proto.Message{"result": result} -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go deleted file mode 100644 index f55285b5d6..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go +++ /dev/null @@ -1,43 +0,0 @@ -package runtime - -import ( - "google.golang.org/genproto/googleapis/api/httpbody" -) - -// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler -func SetHTTPBodyMarshaler(serveMux *ServeMux) { - serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ - Marshaler: &JSONPb{OrigName: true}, - } -} - -// HTTPBodyMarshaler is a Marshaler which supports marshaling of a -// google.api.HttpBody message as the full response body if it is -// the actual message used as the response. If not, then this will -// simply fallback to the Marshaler specified as its default Marshaler. -type HTTPBodyMarshaler struct { - Marshaler -} - -// ContentType implementation to keep backwards compatability with marshal interface -func (h *HTTPBodyMarshaler) ContentType() string { - return h.ContentTypeFromMessage(nil) -} - -// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns -// its specified content type otherwise fall back to the default Marshaler. -func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { - if httpBody, ok := v.(*httpbody.HttpBody); ok { - return httpBody.GetContentType() - } - return h.Marshaler.ContentType() -} - -// Marshal marshals "v" by returning the body bytes if v is a -// google.api.HttpBody message, otherwise it falls back to the default Marshaler. -func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { - if httpBody, ok := v.(*httpbody.HttpBody); ok { - return httpBody.Data, nil - } - return h.Marshaler.Marshal(v) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go deleted file mode 100644 index f9d3a585a4..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go +++ /dev/null @@ -1,45 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON -// with the standard "encoding/json" package of Golang. -// Although it is generally faster for simple proto messages than JSONPb, -// it does not support advanced features of protobuf, e.g. map, oneof, .... -// -// The NewEncoder and NewDecoder types return *json.Encoder and -// *json.Decoder respectively. -type JSONBuiltin struct{} - -// ContentType always Returns "application/json". -func (*JSONBuiltin) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { - return json.NewDecoder(r) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JSONBuiltin) Delimiter() []byte { - return []byte("\n") -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go deleted file mode 100644 index 3530dddd0a..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go +++ /dev/null @@ -1,242 +0,0 @@ -package runtime - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" -) - -// JSONPb is a Marshaler which marshals/unmarshals into/from JSON -// with the "github.com/golang/protobuf/jsonpb". -// It supports fully functionality of protobuf unlike JSONBuiltin. -// -// The NewDecoder method returns a DecoderWrapper, so the underlying -// *json.Decoder methods can be used. -type JSONPb jsonpb.Marshaler - -// ContentType always returns "application/json". -func (*JSONPb) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON. -func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - - var buf bytes.Buffer - if err := j.marshalTo(&buf, v); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - buf, err := j.marshalNonProtoField(v) - if err != nil { - return err - } - _, err = w.Write(buf) - return err - } - return (*jsonpb.Marshaler)(j).Marshal(w, p) -} - -var ( - // protoMessageType is stored to prevent constant lookup of the same type at runtime. - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() -) - -// marshalNonProto marshals a non-message field of a protobuf message. -// This function does not correctly marshals arbitrary data structure into JSON, -// but it is only capable of marshaling non-message field values of protobuf, -// i.e. primitive types, enums; pointers to primitives or enums; maps from -// integer/string types to primitives/enums/pointers to messages. -func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { - if v == nil { - return []byte("null"), nil - } - rv := reflect.ValueOf(v) - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return []byte("null"), nil - } - rv = rv.Elem() - } - - if rv.Kind() == reflect.Slice { - if rv.IsNil() { - if j.EmitDefaults { - return []byte("[]"), nil - } - return []byte("null"), nil - } - - if rv.Type().Elem().Implements(protoMessageType) { - var buf bytes.Buffer - err := buf.WriteByte('[') - if err != nil { - return nil, err - } - for i := 0; i < rv.Len(); i++ { - if i != 0 { - err = buf.WriteByte(',') - if err != nil { - return nil, err - } - } - if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { - return nil, err - } - } - err = buf.WriteByte(']') - if err != nil { - return nil, err - } - - return buf.Bytes(), nil - } - } - - if rv.Kind() == reflect.Map { - m := make(map[string]*json.RawMessage) - for _, k := range rv.MapKeys() { - buf, err := j.Marshal(rv.MapIndex(k).Interface()) - if err != nil { - return nil, err - } - m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) - } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } - return json.Marshal(m) - } - if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { - return json.Marshal(enum.String()) - } - return json.Marshal(rv.Interface()) -} - -// Unmarshal unmarshals JSON "data" into "v" -func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { - return unmarshalJSONPb(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONPb) NewDecoder(r io.Reader) Decoder { - d := json.NewDecoder(r) - return DecoderWrapper{Decoder: d} -} - -// DecoderWrapper is a wrapper around a *json.Decoder that adds -// support for protos to the Decode method. -type DecoderWrapper struct { - *json.Decoder -} - -// Decode wraps the embedded decoder's Decode method to support -// protos using a jsonpb.Unmarshaler. -func (d DecoderWrapper) Decode(v interface{}) error { - return decodeJSONPb(d.Decoder, v) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONPb) NewEncoder(w io.Writer) Encoder { - return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) }) -} - -func unmarshalJSONPb(data []byte, v interface{}) error { - d := json.NewDecoder(bytes.NewReader(data)) - return decodeJSONPb(d, v) -} - -func decodeJSONPb(d *json.Decoder, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - return decodeNonProtoField(d, v) - } - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, p) -} - -func decodeNonProtoField(d *json.Decoder, v interface{}) error { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return fmt.Errorf("%T is not a pointer", v) - } - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - if rv.Type().ConvertibleTo(typeProtoMessage) { - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) - } - rv = rv.Elem() - } - if rv.Kind() == reflect.Map { - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - conv, ok := convFromType[rv.Type().Key().Kind()] - if !ok { - return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) - } - - m := make(map[string]*json.RawMessage) - if err := d.Decode(&m); err != nil { - return err - } - for k, v := range m { - result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - bk := result[0] - bv := reflect.New(rv.Type().Elem()) - if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { - return err - } - rv.SetMapIndex(bk, bv.Elem()) - } - return nil - } - if _, ok := rv.Interface().(protoEnum); ok { - var repr interface{} - if err := d.Decode(&repr); err != nil { - return err - } - switch repr.(type) { - case string: - // TODO(yugui) Should use proto.StructProperties? - return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) - case float64: - rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) - return nil - default: - return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) - } - } - return d.Decode(v) -} - -type protoEnum interface { - fmt.Stringer - EnumDescriptor() ([]byte, []int) -} - -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() - -// Delimiter for newline encoded JSON streams. -func (j *JSONPb) Delimiter() []byte { - return []byte("\n") -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go deleted file mode 100644 index f65d1a2676..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go +++ /dev/null @@ -1,62 +0,0 @@ -package runtime - -import ( - "io" - - "errors" - "github.com/golang/protobuf/proto" - "io/ioutil" -) - -// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes -type ProtoMarshaller struct{} - -// ContentType always returns "application/octet-stream". -func (*ProtoMarshaller) ContentType() string { - return "application/octet-stream" -} - -// Marshal marshals "value" into Proto -func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { - message, ok := value.(proto.Message) - if !ok { - return nil, errors.New("unable to marshal non proto field") - } - return proto.Marshal(message) -} - -// Unmarshal unmarshals proto "data" into "value" -func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { - message, ok := value.(proto.Message) - if !ok { - return errors.New("unable to unmarshal non proto field") - } - return proto.Unmarshal(data, message) -} - -// NewDecoder returns a Decoder which reads proto stream from "reader". -func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { - return DecoderFunc(func(value interface{}) error { - buffer, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - return marshaller.Unmarshal(buffer, value) - }) -} - -// NewEncoder returns an Encoder which writes proto stream into "writer". -func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { - return EncoderFunc(func(value interface{}) error { - buffer, err := marshaller.Marshal(value) - if err != nil { - return err - } - _, err = writer.Write(buffer) - if err != nil { - return err - } - - return nil - }) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go deleted file mode 100644 index 98fe6e88ac..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go +++ /dev/null @@ -1,48 +0,0 @@ -package runtime - -import ( - "io" -) - -// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. -type Marshaler interface { - // Marshal marshals "v" into byte sequence. - Marshal(v interface{}) ([]byte, error) - // Unmarshal unmarshals "data" into "v". - // "v" must be a pointer value. - Unmarshal(data []byte, v interface{}) error - // NewDecoder returns a Decoder which reads byte sequence from "r". - NewDecoder(r io.Reader) Decoder - // NewEncoder returns an Encoder which writes bytes sequence into "w". - NewEncoder(w io.Writer) Encoder - // ContentType returns the Content-Type which this marshaler is responsible for. - ContentType() string -} - -// Decoder decodes a byte sequence -type Decoder interface { - Decode(v interface{}) error -} - -// Encoder encodes gRPC payloads / fields into byte sequence. -type Encoder interface { - Encode(v interface{}) error -} - -// DecoderFunc adapts an decoder function into Decoder. -type DecoderFunc func(v interface{}) error - -// Decode delegates invocations to the underlying function itself. -func (f DecoderFunc) Decode(v interface{}) error { return f(v) } - -// EncoderFunc adapts an encoder function into Encoder -type EncoderFunc func(v interface{}) error - -// Encode delegates invocations to the underlying function itself. -func (f EncoderFunc) Encode(v interface{}) error { return f(v) } - -// Delimited defines the streaming delimiter. -type Delimited interface { - // Delimiter returns the record seperator for the stream. - Delimiter() []byte -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go deleted file mode 100644 index 5cc53ae4f6..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go +++ /dev/null @@ -1,91 +0,0 @@ -package runtime - -import ( - "errors" - "net/http" -) - -// MIMEWildcard is the fallback MIME type used for requests which do not match -// a registered MIME type. -const MIMEWildcard = "*" - -var ( - acceptHeader = http.CanonicalHeaderKey("Accept") - contentTypeHeader = http.CanonicalHeaderKey("Content-Type") - - defaultMarshaler = &JSONPb{OrigName: true} -) - -// MarshalerForRequest returns the inbound/outbound marshalers for this request. -// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. -// If it isn't set (or the request Content-Type is empty), checks for "*". -// If there are multiple Content-Type headers set, choose the first one that it can -// exactly match in the registry. -// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. -func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { - for _, acceptVal := range r.Header[acceptHeader] { - if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { - outbound = m - break - } - } - - for _, contentTypeVal := range r.Header[contentTypeHeader] { - if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok { - inbound = m - break - } - } - - if inbound == nil { - inbound = mux.marshalers.mimeMap[MIMEWildcard] - } - if outbound == nil { - outbound = inbound - } - - return inbound, outbound -} - -// marshalerRegistry is a mapping from MIME types to Marshalers. -type marshalerRegistry struct { - mimeMap map[string]Marshaler -} - -// add adds a marshaler for a case-sensitive MIME type string ("*" to match any -// MIME type). -func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { - if len(mime) == 0 { - return errors.New("empty MIME type") - } - - m.mimeMap[mime] = marshaler - - return nil -} - -// makeMarshalerMIMERegistry returns a new registry of marshalers. -// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. -// -// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. -// "*" can be used to match any Content-Type. -// This can be attached to a ServerMux with the marshaler option. -func makeMarshalerMIMERegistry() marshalerRegistry { - return marshalerRegistry{ - mimeMap: map[string]Marshaler{ - MIMEWildcard: defaultMarshaler, - }, - } -} - -// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound -// Marshalers to a MIME type in mux. -func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { - return func(mux *ServeMux) { - if err := mux.marshalers.add(mime, marshaler); err != nil { - panic(err) - } - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go deleted file mode 100644 index ec81e55b5e..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ /dev/null @@ -1,268 +0,0 @@ -package runtime - -import ( - "context" - "fmt" - "net/http" - "net/textproto" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// A HandlerFunc handles a specific pair of path pattern and HTTP method. -type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) - -// ServeMux is a request multiplexer for grpc-gateway. -// It matches http requests to patterns and invokes the corresponding handler. -type ServeMux struct { - // handlers maps HTTP method to a list of handlers. - handlers map[string][]handler - forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error - marshalers marshalerRegistry - incomingHeaderMatcher HeaderMatcherFunc - outgoingHeaderMatcher HeaderMatcherFunc - metadataAnnotators []func(context.Context, *http.Request) metadata.MD - protoErrorHandler ProtoErrorHandlerFunc - disablePathLengthFallback bool -} - -// ServeMuxOption is an option that can be given to a ServeMux on construction. -type ServeMuxOption func(*ServeMux) - -// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. -// -// forwardResponseOption is an option that will be called on the relevant context.Context, -// http.ResponseWriter, and proto.Message before every forwarded response. -// -// The message may be nil in the case where just a header is being sent. -func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) - } -} - -// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. -type HeaderMatcherFunc func(string) (string, bool) - -// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header -// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with -// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. -func DefaultHeaderMatcher(key string) (string, bool) { - key = textproto.CanonicalMIMEHeaderKey(key) - if isPermanentHTTPHeader(key) { - return MetadataPrefix + key, true - } else if strings.HasPrefix(key, MetadataHeaderPrefix) { - return key[len(MetadataHeaderPrefix):], true - } - return "", false -} - -// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. -// -// This matcher will be called with each header in http.Request. If matcher returns true, that header will be -// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. -func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.incomingHeaderMatcher = fn - } -} - -// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. -// -// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be -// passed to http response returned from gateway. To transform the header before passing to response, -// matcher should return modified header. -func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.outgoingHeaderMatcher = fn - } -} - -// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used by services that need to read from http.Request and modify gRPC context. A common use case -// is reading token from cookie and adding it in gRPC context. -func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) - } -} - -// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used to handle an error as general proto message defined by gRPC. -// The response including body and status is not backward compatible with the default error handler. -// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. -func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.protoErrorHandler = fn - } -} - -// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. -func WithDisablePathLengthFallback() ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.disablePathLengthFallback = true - } -} - -// NewServeMux returns a new ServeMux whose internal mapping is empty. -func NewServeMux(opts ...ServeMuxOption) *ServeMux { - serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - } - - for _, opt := range opts { - opt(serveMux) - } - - if serveMux.protoErrorHandler != nil { - HTTPError = serveMux.protoErrorHandler - // OtherErrorHandler is no longer used when protoErrorHandler is set. - // Overwritten by a special error handler to return Unknown. - OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { - ctx := context.Background() - _, outboundMarshaler := MarshalerForRequest(serveMux, r) - sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") - serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) - } - } - - if serveMux.incomingHeaderMatcher == nil { - serveMux.incomingHeaderMatcher = DefaultHeaderMatcher - } - - if serveMux.outgoingHeaderMatcher == nil { - serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { - return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true - } - } - - return serveMux -} - -// Handle associates "h" to the pair of HTTP method and path pattern. -func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { - s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) -} - -// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. -func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - path := r.URL.Path - if !strings.HasPrefix(path, "/") { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) - } - return - } - - components := strings.Split(path[1:], "/") - l := len(components) - var verb string - if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } - return - } else if idx > 0 { - c := components[l-1] - components[l-1], verb = c[:idx], c[idx+1:] - } - - if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { - r.Method = strings.ToUpper(override) - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - } - for _, h := range s.handlers[r.Method] { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - h.h(w, r, pathParams) - return - } - - // lookup other methods to handle fallback from GET to POST and - // to determine if it is MethodNotAllowed or NotFound. - for m, handlers := range s.handlers { - if m == r.Method { - continue - } - for _, h := range handlers { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - // X-HTTP-Method-Override is optional. Always allow fallback to POST. - if s.isPathLengthFallback(r) { - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - h.h(w, r, pathParams) - return - } - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) - } - return - } - } - - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } -} - -// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. -func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { - return s.forwardResponseOptions -} - -func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { - return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" -} - -type handler struct { - pat Pattern - h HandlerFunc -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go deleted file mode 100644 index f16a84ad38..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ /dev/null @@ -1,227 +0,0 @@ -package runtime - -import ( - "errors" - "fmt" - "strings" - - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -var ( - // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. - ErrNotMatch = errors.New("not match to the path pattern") - // ErrInvalidPattern indicates that the given definition of Pattern is not valid. - ErrInvalidPattern = errors.New("invalid pattern") -) - -type op struct { - code utilities.OpCode - operand int -} - -// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. -type Pattern struct { - // ops is a list of operations - ops []op - // pool is a constant pool indexed by the operands or vars. - pool []string - // vars is a list of variables names to be bound by this pattern - vars []string - // stacksize is the max depth of the stack - stacksize int - // tailLen is the length of the fixed-size segments after a deep wildcard - tailLen int - // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. - verb string -} - -// NewPattern returns a new Pattern from the given definition values. -// "ops" is a sequence of op codes. "pool" is a constant pool. -// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. -// "version" must be 1 for now. -// It returns an error if the given definition is invalid. -func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { - if version != 1 { - grpclog.Infof("unsupported version: %d", version) - return Pattern{}, ErrInvalidPattern - } - - l := len(ops) - if l%2 != 0 { - grpclog.Infof("odd number of ops codes: %d", l) - return Pattern{}, ErrInvalidPattern - } - - var ( - typedOps []op - stack, maxstack int - tailLen int - pushMSeen bool - vars []string - ) - for i := 0; i < l; i += 2 { - op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpPushM: - if pushMSeen { - grpclog.Infof("pushM appears twice") - return Pattern{}, ErrInvalidPattern - } - pushMSeen = true - stack++ - case utilities.OpLitPush: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("negative literal index: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpConcatN: - if op.operand <= 0 { - grpclog.Infof("negative concat size: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - stack -= op.operand - if stack < 0 { - grpclog.Print("stack underflow") - return Pattern{}, ErrInvalidPattern - } - stack++ - case utilities.OpCapture: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("variable name index out of bound: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - v := pool[op.operand] - op.operand = len(vars) - vars = append(vars, v) - stack-- - if stack < 0 { - grpclog.Infof("stack underflow") - return Pattern{}, ErrInvalidPattern - } - default: - grpclog.Infof("invalid opcode: %d", op.code) - return Pattern{}, ErrInvalidPattern - } - - if maxstack < stack { - maxstack = stack - } - typedOps = append(typedOps, op) - } - return Pattern{ - ops: typedOps, - pool: pool, - vars: vars, - stacksize: maxstack, - tailLen: tailLen, - verb: verb, - }, nil -} - -// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. -func MustPattern(p Pattern, err error) Pattern { - if err != nil { - grpclog.Fatalf("Pattern initialization failed: %v", err) - } - return p -} - -// Match examines components if it matches to the Pattern. -// If it matches, the function returns a mapping from field paths to their captured values. -// If otherwise, the function returns an error. -func (p Pattern) Match(components []string, verb string) (map[string]string, error) { - if p.verb != verb { - return nil, ErrNotMatch - } - - var pos int - stack := make([]string, 0, p.stacksize) - captured := make([]string, len(p.vars)) - l := len(components) - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush, utilities.OpLitPush: - if pos >= l { - return nil, ErrNotMatch - } - c := components[pos] - if op.code == utilities.OpLitPush { - if lit := p.pool[op.operand]; c != lit { - return nil, ErrNotMatch - } - } - stack = append(stack, c) - pos++ - case utilities.OpPushM: - end := len(components) - if end < pos+p.tailLen { - return nil, ErrNotMatch - } - end -= p.tailLen - stack = append(stack, strings.Join(components[pos:end], "/")) - pos = end - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - captured[op.operand] = stack[n] - stack = stack[:n] - } - } - if pos < l { - return nil, ErrNotMatch - } - bindings := make(map[string]string) - for i, val := range captured { - bindings[p.vars[i]] = val - } - return bindings, nil -} - -// Verb returns the verb part of the Pattern. -func (p Pattern) Verb() string { return p.verb } - -func (p Pattern) String() string { - var stack []string - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - stack = append(stack, "*") - case utilities.OpLitPush: - stack = append(stack, p.pool[op.operand]) - case utilities.OpPushM: - stack = append(stack, "**") - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) - } - } - segs := strings.Join(stack, "/") - if p.verb != "" { - return fmt.Sprintf("/%s:%s", segs, p.verb) - } - return "/" + segs -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go deleted file mode 100644 index a3151e2a55..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go +++ /dev/null @@ -1,80 +0,0 @@ -package runtime - -import ( - "github.com/golang/protobuf/proto" -) - -// StringP returns a pointer to a string whose pointee is same as the given string value. -func StringP(val string) (*string, error) { - return proto.String(val), nil -} - -// BoolP parses the given string representation of a boolean value, -// and returns a pointer to a bool whose value is same as the parsed value. -func BoolP(val string) (*bool, error) { - b, err := Bool(val) - if err != nil { - return nil, err - } - return proto.Bool(b), nil -} - -// Float64P parses the given string representation of a floating point number, -// and returns a pointer to a float64 whose value is same as the parsed number. -func Float64P(val string) (*float64, error) { - f, err := Float64(val) - if err != nil { - return nil, err - } - return proto.Float64(f), nil -} - -// Float32P parses the given string representation of a floating point number, -// and returns a pointer to a float32 whose value is same as the parsed number. -func Float32P(val string) (*float32, error) { - f, err := Float32(val) - if err != nil { - return nil, err - } - return proto.Float32(f), nil -} - -// Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. -func Int64P(val string) (*int64, error) { - i, err := Int64(val) - if err != nil { - return nil, err - } - return proto.Int64(i), nil -} - -// Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. -func Int32P(val string) (*int32, error) { - i, err := Int32(val) - if err != nil { - return nil, err - } - return proto.Int32(i), err -} - -// Uint64P parses the given string representation of an integer -// and returns a pointer to a uint64 whose value is same as the parsed integer. -func Uint64P(val string) (*uint64, error) { - i, err := Uint64(val) - if err != nil { - return nil, err - } - return proto.Uint64(i), err -} - -// Uint32P parses the given string representation of an integer -// and returns a pointer to a uint32 whose value is same as the parsed integer. -func Uint32P(val string) (*uint32, error) { - i, err := Uint32(val) - if err != nil { - return nil, err - } - return proto.Uint32(i), err -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go deleted file mode 100644 index b7fa32e45d..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go +++ /dev/null @@ -1,70 +0,0 @@ -package runtime - -import ( - "io" - "net/http" - - "context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. -type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) - -var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler - -// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a Status message marshaled by a Marshaler. -// -// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. -func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - // return Internal when Marshal failed - const fallback = `{"code": 13, "message": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { - pb := s.Proto() - contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - buf, merr := marshaler.Marshal(s.Proto()) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go deleted file mode 100644 index bb9359f17c..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ /dev/null @@ -1,392 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -// PopulateQueryParameters populates "values" into "msg". -// A value is ignored if its key starts with one of the elements in "filter". -func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - for key, values := range values { - re, err := regexp.Compile("^(.*)\\[(.*)\\]$") - if err != nil { - return err - } - match := re.FindStringSubmatch(key) - if len(match) == 3 { - key = match[1] - values = append([]string{match[2]}, values...) - } - fieldPath := strings.Split(key, ".") - if filter.HasCommonPrefix(fieldPath) { - continue - } - if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { - return err - } - } - return nil -} - -// PopulateFieldFromPath sets a value in a nested Protobuf structure. -// It instantiates missing protobuf fields as it goes. -func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { - fieldPath := strings.Split(fieldPathString, ".") - return populateFieldValueFromPath(msg, fieldPath, []string{value}) -} - -func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { - m := reflect.ValueOf(msg) - if m.Kind() != reflect.Ptr { - return fmt.Errorf("unexpected type %T: %v", msg, msg) - } - var props *proto.Properties - m = m.Elem() - for i, fieldName := range fieldPath { - isLast := i == len(fieldPath)-1 - if !isLast && m.Kind() != reflect.Struct { - return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) - } - var f reflect.Value - var err error - f, props, err = fieldByProtoName(m, fieldName) - if err != nil { - return err - } else if !f.IsValid() { - grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) - return nil - } - - switch f.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - m = f - case reflect.Slice: - if !isLast { - return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) - } - // Handle []byte - if f.Type().Elem().Kind() == reflect.Uint8 { - m = f - break - } - return populateRepeatedField(f, values, props) - case reflect.Ptr: - if f.IsNil() { - m = reflect.New(f.Type().Elem()) - f.Set(m.Convert(f.Type())) - } - m = f.Elem() - continue - case reflect.Struct: - m = f - continue - case reflect.Map: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - return populateMapField(f, values, props) - default: - return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) - } - } - switch len(values) { - case 0: - return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) - case 1: - default: - grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) - } - return populateField(m, values[0], props) -} - -// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". -// "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { - props := proto.GetProperties(m.Type()) - - // look up field name in oneof map - if op, ok := props.OneofTypes[name]; ok { - v := reflect.New(op.Type.Elem()) - field := m.Field(op.Field) - if !field.IsNil() { - return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) - } - field.Set(v) - return v.Elem().Field(0), op.Prop, nil - } - - for _, p := range props.Prop { - if p.OrigName == name { - return m.FieldByName(p.Name), p, nil - } - if p.JSONName == name { - return m.FieldByName(p.Name), p, nil - } - } - return reflect.Value{}, nil, nil -} - -func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { - if len(values) != 2 { - return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) - } - - key, value := values[0], values[1] - keyType := f.Type().Key() - valueType := f.Type().Elem() - if f.IsNil() { - f.Set(reflect.MakeMap(f.Type())) - } - - keyConv, ok := convFromType[keyType.Kind()] - if !ok { - return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) - } - valueConv, ok := convFromType[valueType.Kind()] - if !ok { - return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) - } - - keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) - if err := keyV[1].Interface(); err != nil { - return err.(error) - } - valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := valueV[1].Interface(); err != nil { - return err.(error) - } - - f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) - - return nil -} - -func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { - elemType := f.Type().Elem() - - // is the destination field a slice of an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnumRepeated(f, values, enumValMap) - } - - conv, ok := convFromType[elemType.Kind()] - if !ok { - return fmt.Errorf("unsupported field type %s", elemType) - } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Index(i).Set(result[0].Convert(f.Index(i).Type())) - } - return nil -} - -func populateField(f reflect.Value, value string, props *proto.Properties) error { - i := f.Addr().Interface() - - // Handle protobuf well known types - type wkt interface { - XXX_WellKnownType() string - } - if wkt, ok := i.(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "Timestamp": - if value == "null" { - f.Field(0).SetInt(0) - f.Field(1).SetInt(0) - return nil - } - - t, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - f.Field(0).SetInt(int64(t.Unix())) - f.Field(1).SetInt(int64(t.Nanosecond())) - return nil - case "Duration": - if value == "null" { - f.Field(0).SetInt(0) - f.Field(1).SetInt(0) - return nil - } - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - f.Field(0).SetInt(s) - f.Field(1).SetInt(ns) - return nil - case "DoubleValue": - fallthrough - case "FloatValue": - float64Val, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetFloat(float64Val) - return nil - case "Int64Value": - fallthrough - case "Int32Value": - int64Val, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetInt(int64Val) - return nil - case "UInt64Value": - fallthrough - case "UInt32Value": - uint64Val, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetUint(uint64Val) - return nil - case "BoolValue": - if value == "true" { - f.Field(0).SetBool(true) - } else if value == "false" { - f.Field(0).SetBool(false) - } else { - return fmt.Errorf("bad BoolValue: %s", value) - } - return nil - case "StringValue": - f.Field(0).SetString(value) - return nil - case "BytesValue": - bytesVal, err := base64.StdEncoding.DecodeString(value) - if err != nil { - return fmt.Errorf("bad BytesValue: %s", value) - } - f.Field(0).SetBytes(bytesVal) - return nil - } - } - - // Handle google well known types - if gwkt, ok := i.(proto.Message); ok { - switch proto.MessageName(gwkt) { - case "google.protobuf.FieldMask": - p := f.Field(0) - for _, v := range strings.Split(value, ",") { - if v != "" { - p.Set(reflect.Append(p, reflect.ValueOf(v))) - } - } - return nil - } - } - - // Handle Time and Duration stdlib types - switch t := i.(type) { - case *time.Time: - pt, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - *t = pt - return nil - case *time.Duration: - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - *t = d - return nil - } - - // is the destination field an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnum(f, value, enumValMap) - } - - conv, ok := convFromType[f.Kind()] - if !ok { - return fmt.Errorf("field type %T is not supported in query parameters", i) - } - result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Set(result[0].Convert(f.Type())) - return nil -} - -func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { - // see if it's an enumeration string - if enumVal, ok := enumValMap[value]; ok { - return reflect.ValueOf(enumVal).Convert(t), nil - } - - // check for an integer that matches an enumeration value - eVal, err := strconv.Atoi(value) - if err != nil { - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) - } - for _, v := range enumValMap { - if v == int32(eVal) { - return reflect.ValueOf(eVal).Convert(t), nil - } - } - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) -} - -func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { - cval, err := convertEnum(value, f.Type(), enumValMap) - if err != nil { - return err - } - f.Set(cval) - return nil -} - -func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { - elemType := f.Type().Elem() - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result, err := convertEnum(v, elemType, enumValMap) - if err != nil { - return err - } - f.Index(i).Set(result) - } - return nil -} - -var ( - convFromType = map[reflect.Kind]reflect.Value{ - reflect.String: reflect.ValueOf(String), - reflect.Bool: reflect.ValueOf(Bool), - reflect.Float64: reflect.ValueOf(Float64), - reflect.Float32: reflect.ValueOf(Float32), - reflect.Int64: reflect.ValueOf(Int64), - reflect.Int32: reflect.ValueOf(Int32), - reflect.Uint64: reflect.ValueOf(Uint64), - reflect.Uint32: reflect.ValueOf(Uint32), - reflect.Slice: reflect.ValueOf(Bytes), - } -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel deleted file mode 100644 index 7109d79323..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "pattern.go", - "readerfactory.go", - "trie.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", -) - -go_test( - name = "go_default_test", - size = "small", - srcs = ["trie_test.go"], - embed = [":go_default_library"], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go deleted file mode 100644 index cf79a4d588..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package utilities provides members for internal use in grpc-gateway. -package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go deleted file mode 100644 index dfe7de4864..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go +++ /dev/null @@ -1,22 +0,0 @@ -package utilities - -// An OpCode is a opcode of compiled path patterns. -type OpCode int - -// These constants are the valid values of OpCode. -const ( - // OpNop does nothing - OpNop = OpCode(iota) - // OpPush pushes a component to stack - OpPush - // OpLitPush pushes a component to stack if it matches to the literal - OpLitPush - // OpPushM concatenates the remaining components and pushes it to stack - OpPushM - // OpConcatN pops N items from stack, concatenates them and pushes it back to stack - OpConcatN - // OpCapture pops an item and binds it to the variable - OpCapture - // OpEnd is the least positive invalid opcode. - OpEnd -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go deleted file mode 100644 index 6dd3854665..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go +++ /dev/null @@ -1,20 +0,0 @@ -package utilities - -import ( - "bytes" - "io" - "io/ioutil" -) - -// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins -// at the start of the stream -func IOReaderFactory(r io.Reader) (func() io.Reader, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - return func() io.Reader { - return bytes.NewReader(b) - }, nil -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go deleted file mode 100644 index c2b7b30dd9..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go +++ /dev/null @@ -1,177 +0,0 @@ -package utilities - -import ( - "sort" -) - -// DoubleArray is a Double Array implementation of trie on sequences of strings. -type DoubleArray struct { - // Encoding keeps an encoding from string to int - Encoding map[string]int - // Base is the base array of Double Array - Base []int - // Check is the check array of Double Array - Check []int -} - -// NewDoubleArray builds a DoubleArray from a set of sequences of strings. -func NewDoubleArray(seqs [][]string) *DoubleArray { - da := &DoubleArray{Encoding: make(map[string]int)} - if len(seqs) == 0 { - return da - } - - encoded := registerTokens(da, seqs) - sort.Sort(byLex(encoded)) - - root := node{row: -1, col: -1, left: 0, right: len(encoded)} - addSeqs(da, encoded, 0, root) - - for i := len(da.Base); i > 0; i-- { - if da.Check[i-1] != 0 { - da.Base = da.Base[:i] - da.Check = da.Check[:i] - break - } - } - return da -} - -func registerTokens(da *DoubleArray, seqs [][]string) [][]int { - var result [][]int - for _, seq := range seqs { - var encoded []int - for _, token := range seq { - if _, ok := da.Encoding[token]; !ok { - da.Encoding[token] = len(da.Encoding) - } - encoded = append(encoded, da.Encoding[token]) - } - result = append(result, encoded) - } - for i := range result { - result[i] = append(result[i], len(da.Encoding)) - } - return result -} - -type node struct { - row, col int - left, right int -} - -func (n node) value(seqs [][]int) int { - return seqs[n.row][n.col] -} - -func (n node) children(seqs [][]int) []*node { - var result []*node - lastVal := int(-1) - last := new(node) - for i := n.left; i < n.right; i++ { - if lastVal == seqs[i][n.col+1] { - continue - } - last.right = i - last = &node{ - row: i, - col: n.col + 1, - left: i, - } - result = append(result, last) - } - last.right = n.right - return result -} - -func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { - ensureSize(da, pos) - - children := n.children(seqs) - var i int - for i = 1; ; i++ { - ok := func() bool { - for _, child := range children { - code := child.value(seqs) - j := i + code - ensureSize(da, j) - if da.Check[j] != 0 { - return false - } - } - return true - }() - if ok { - break - } - } - da.Base[pos] = i - for _, child := range children { - code := child.value(seqs) - j := i + code - da.Check[j] = pos + 1 - } - terminator := len(da.Encoding) - for _, child := range children { - code := child.value(seqs) - if code == terminator { - continue - } - j := i + code - addSeqs(da, seqs, j, *child) - } -} - -func ensureSize(da *DoubleArray, i int) { - for i >= len(da.Base) { - da.Base = append(da.Base, make([]int, len(da.Base)+1)...) - da.Check = append(da.Check, make([]int, len(da.Check)+1)...) - } -} - -type byLex [][]int - -func (l byLex) Len() int { return len(l) } -func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l byLex) Less(i, j int) bool { - si := l[i] - sj := l[j] - var k int - for k = 0; k < len(si) && k < len(sj); k++ { - if si[k] < sj[k] { - return true - } - if si[k] > sj[k] { - return false - } - } - if k < len(sj) { - return true - } - return false -} - -// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. -func (da *DoubleArray) HasCommonPrefix(seq []string) bool { - if len(da.Base) == 0 { - return false - } - - var i int - for _, t := range seq { - code, ok := da.Encoding[t] - if !ok { - break - } - j := da.Base[i] + code - if len(da.Check) <= j || da.Check[j] != i+1 { - break - } - i = j - } - j := da.Base[i] + len(da.Encoding) - if len(da.Check) <= j || da.Check[j] != i+1 { - return false - } - return true -} diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/.gitignore b/vendor/github.com/hashicorp/go-kms-wrapping/.gitignore new file mode 100644 index 0000000000..722d5e71d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/.gitignore @@ -0,0 +1 @@ +.vscode diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/LICENSE b/vendor/github.com/hashicorp/go-kms-wrapping/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/Makefile b/vendor/github.com/hashicorp/go-kms-wrapping/Makefile new file mode 100644 index 0000000000..353e5fd7ff --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/Makefile @@ -0,0 +1,9 @@ +# Determine this makefile's path. +# Be sure to place this BEFORE `include` directives, if any. +THIS_FILE := $(lastword $(MAKEFILE_LIST)) + +proto: + protoc types.proto --go_out=paths=source_relative:. + sed -i -e 's/Iv/IV/' -e 's/Hmac/HMAC/' types.pb.go + +.PHONY: proto diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/README.md b/vendor/github.com/hashicorp/go-kms-wrapping/README.md new file mode 100644 index 0000000000..4dc61de94b --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/README.md @@ -0,0 +1,106 @@ +# Go-KMS-Wrapping - Go library for encrypting values through various KMS providers + +[![Godoc](https://godoc.org/github.com/hashicorp/go-kms-wrapping?status.svg)](https://godoc.org/github.com/hashicorp/go-kms-wrapping) + +*NOTE*: Currently no compatibility guarantees are provided for this library; we +expect tags to remain in the `0.x.y` range. + +Go-KMS-Wrapping is a library that can be used to encrypt things through various +KMS providers -- public clouds, Vault's Transit plugin, etc. It is similar in +concept to various other cryptosystems (like NaCl) but focuses on using third +party KMSes. This library is the underpinning of Vault's auto-unseal +functionality, and should be ready to use for many other applications. + +For KMS providers that do not support encrypting arbitrarily large values, the +library will generate an envelope data encryption key (DEK), encrypt the value +with it using an authenticated cipher, and use the KMS to encrypt the DEK. + +The key being used by a given implementation can change; the library stores +information about which key was actually used to encrypt a given value as part +of the returned data, and this key will be used for decryption. By extension, +this means that users should be careful not to delete keys in KMS systems +simply because they're not configured to be used by this library _currently_, +as they may have been used for past encryption operations. + + + + + +- [Features](#features) +- [Installation](#installation) +- [Overview](#overview) +- [Usage](#usage) + + + +## Features + + * Supports many KMSes: + * * AEAD using AES-GCM and a provided key + * * Alibaba Cloud KMS (uses envelopes) + * * AWS KMS (uses envelopes) + * * GCP CKMS (uses envelopes) + * * Azure KeyVault (uses envelopes) + * * OCI KMS (uses envelopes) + * * Vault Transit mount + * Transparently supports multiple decryption targets, allowing for key rotation + * Supports Additional Authenticated Data (AAD) for all KMSes except Vault Transit. + +## Installation + +Import like any other library; supports go modules. It has not been tested with +non-`go mod` vendoring tools. + +## Overview + +The library exports a `Wrapper` interface that is implemented by multiple +providers. Each of these providers may have some functions specific to them, +usually to pass configuration information. A normal workflow is to create the +provider directly, pass it any needed configuration via the provider-specific +methods, and then have the rest of your code use the `Wrapper` interface. + +Some of the functions make use of option structs that are currently empty. This +is to allow options to be added later without breaking backwards compatibility. + +The best place to find the currently available set of configuration options +supported by each provider is its code, but it can also be found in [Vault's +seal configuration +documentation](https://www.vaultproject.io/docs/configuration/seal/index.html). +All environment variables noted there also work in this library, however, +non-Vault-specific variants of the environment variables are also available for +each provider. See the code/comments in each given provider for the currently +allowed env vars. + +## Usage + +Following is an example usage of the AWS KMS provider. + +```go +// Context used in this library is passed to various underlying provider +// libraries; how it's used is dependent on the provider libraries +ctx := context.Background() + +wrapper := awskms.NewWrapper(nil) +_, err := kms.SetConfig(&map[string]string{ + "kms_key_id": "1234abcd-12ab-34cd-56ef-1234567890ab" +}) +if err != nil { + return err +} +blobInfo, err := wrapper.Encrypt(ctx, []byte{"foo"}, nil) +if err != nil { + return err +} + +// +// Do some things... +// + +plaintext, err := wrapper.Decrypt(ctx, blobInfo) +if err != nil { + return err +} +if string(plaintext) != "foo" { + return errors.New("mismatch between input and output") +} +``` diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/entropy/entropy.go b/vendor/github.com/hashicorp/go-kms-wrapping/entropy/entropy.go similarity index 85% rename from vendor/github.com/hashicorp/vault/sdk/helper/entropy/entropy.go rename to vendor/github.com/hashicorp/go-kms-wrapping/entropy/entropy.go index afaef76edb..caf4977942 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/entropy/entropy.go +++ b/vendor/github.com/hashicorp/go-kms-wrapping/entropy/entropy.go @@ -2,8 +2,6 @@ package entropy import ( "fmt" - - "github.com/hashicorp/errwrap" ) type Sourcer interface { @@ -28,7 +26,7 @@ func (r *Reader) Read(p []byte) (n int, err error) { delivered := copy(p, randBytes) if delivered != requested { if err != nil { - return delivered, errwrap.Wrapf("unable to fill provided buffer with entropy: {{err}}", err) + return delivered, fmt.Errorf("unable to fill provided buffer with entropy: %w", err) } return delivered, fmt.Errorf("unable to fill provided buffer with entropy") } diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/entropy/testhelper.go b/vendor/github.com/hashicorp/go-kms-wrapping/entropy/testhelper.go new file mode 100644 index 0000000000..97f45f7d2e --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/entropy/testhelper.go @@ -0,0 +1,13 @@ +package entropy + +type mockSourcer struct{} + +// simulates a successful sourcer +func (m *mockSourcer) GetRandom(bytes int) ([]byte, error) { + return make([]byte, bytes), nil +} + +// provide a mock entropy.Reader +func NewMockRandomReader() *Reader { + return &Reader{new(mockSourcer)} +} diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/envelope.go b/vendor/github.com/hashicorp/go-kms-wrapping/envelope.go new file mode 100644 index 0000000000..0b62f976f8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/envelope.go @@ -0,0 +1,76 @@ +package wrapping + +import ( + "crypto/aes" + "crypto/cipher" + "errors" + fmt "fmt" + + uuid "github.com/hashicorp/go-uuid" +) + +// Envelope performs encryption or decryption, wrapping sensitive data. It creates a random key. This is usable on its own but since many KMS systems or key types cannot support large values, this is used by implementations in this package to encrypt large values with a DEK and use the actual KMS to encrypt the DEK. +type Envelope struct{} + +// EnvelopeOptions is a placeholder for future options, such as the ability to switch which algorithm is used +type EnvelopeOptions struct{} + +// EnvelopeInfo contains the information necessary to perfom encryption or decryption in an envelope fashion +type EnvelopeInfo struct { + Ciphertext []byte + Key []byte + IV []byte +} + +// NewEnvelope retuns an Envelope that is ready to use for use. It is valid to pass nil EnvelopeOptions. +func NewEnvelope(opts *EnvelopeOptions) *Envelope { + return &Envelope{} +} + +// Encrypt takes in plaintext and envelope encrypts it, generating an EnvelopeInfo value +func (e *Envelope) Encrypt(plaintext []byte, aad []byte) (*EnvelopeInfo, error) { + // Generate DEK + key, err := uuid.GenerateRandomBytes(32) + if err != nil { + return nil, err + } + iv, err := uuid.GenerateRandomBytes(12) + if err != nil { + return nil, err + } + aead, err := e.aeadEncrypter(key) + if err != nil { + return nil, err + } + + return &EnvelopeInfo{ + Ciphertext: aead.Seal(nil, iv, plaintext, aad), + Key: key, + IV: iv, + }, nil +} + +// Decrypt takes in EnvelopeInfo and potentially additional data and decrypts. Additional data is separate from the encrypted blob info as it is expected that will be sourced from a separate location. +func (e *Envelope) Decrypt(data *EnvelopeInfo, aad []byte) ([]byte, error) { + aead, err := e.aeadEncrypter(data.Key) + if err != nil { + return nil, err + } + + return aead.Open(nil, data.IV, data.Ciphertext, aad) +} + +func (e *Envelope) aeadEncrypter(key []byte) (cipher.AEAD, error) { + aesCipher, err := aes.NewCipher(key) + if err != nil { + return nil, fmt.Errorf("failed to create cipher: %w", err) + } + + // Create the GCM mode AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return nil, errors.New("failed to initialize GCM mode") + } + + return gcm, nil +} diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/go.mod b/vendor/github.com/hashicorp/go-kms-wrapping/go.mod new file mode 100644 index 0000000000..ad0f31fa28 --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/go.mod @@ -0,0 +1,23 @@ +module github.com/hashicorp/go-kms-wrapping + +go 1.13 + +require ( + cloud.google.com/go v0.39.0 + github.com/Azure/azure-sdk-for-go v36.2.0+incompatible + github.com/Azure/go-autorest/autorest v0.9.2 + github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 + github.com/Azure/go-autorest/autorest/to v0.3.0 + github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f + github.com/aws/aws-sdk-go v1.25.37 + github.com/golang/protobuf v1.3.2 + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-hclog v0.10.1 + github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 + github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78 + github.com/hashicorp/vault/sdk v0.1.14-0.20191229212425-c478d00be0d6 + github.com/oracle/oci-go-sdk v12.5.0+incompatible + golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 + google.golang.org/api v0.14.0 + google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 +) diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/go.sum b/vendor/github.com/hashicorp/go-kms-wrapping/go.sum new file mode 100644 index 0000000000..236662da4e --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/go.sum @@ -0,0 +1,289 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0 h1:UgQP9na6OTfp4dsAiz/eFpFA1C6tPdH5wiRdi19tuMw= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD16bhpKNAanfcDDVU+J0NXqsgHIvGbbe/sy+r6Rs0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.25.37 h1:gBtB/F3dophWpsUQKN/Kni+JzYEH2mGHF4hWNtfED1w= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.10.0 h1:b86HUuA126IcSHyC55WjPo7KtCOVeTCKIjr+3lBhPxI= +github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.10.1 h1:uyt/l0dWjJ879yiAu+T7FG3/6QX+zwm4bQ8P7XsYt3o= +github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping v0.0.0-20191129225826-634facde9f88/go.mod h1:Pm+Umb/6Gij6ZG534L7QDyvkauaOQWGb+arj9aFjCE0= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 h1:PKbxRbsOP7R3f/TpdqcgXrO69T3yd9nLoR+RMRUxSxA= +github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78 h1:eCE3FElynU2MAuO3QIfpA969dDdVgRFE+2oeeNN2nGc= +github.com/hashicorp/vault/api v1.0.5-0.20191122173911-80fcc7907c78/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= +github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044 h1:bXjbz4PFfOoMUrqe9upVa0SbJ2RqfbLzh4eprst/b40= +github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= +github.com/hashicorp/vault/sdk v0.1.14-0.20191229212425-c478d00be0d6 h1:tsN1O2BD8Nw0135xV+kQch5wEPC0ESBsrHRSxJC1cBQ= +github.com/hashicorp/vault/sdk v0.1.14-0.20191229212425-c478d00be0d6/go.mod h1:EhK3a4sYnUbANAWxDP4LHf1GvP8DCtISGemfbEGbeo8= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oracle/oci-go-sdk v12.5.0+incompatible h1:pr08ECoaDKHWO9tnzJB1YqClEs7ZK1CFOez2DQocH14= +github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.14.0 h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/internal/xor/xor.go b/vendor/github.com/hashicorp/go-kms-wrapping/internal/xor/xor.go new file mode 100644 index 0000000000..51b7e82ef5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/internal/xor/xor.go @@ -0,0 +1,46 @@ +package xor + +import ( + "encoding/base64" + "fmt" +) + +// XORBytes takes two byte slices and XORs them together, returning the final +// byte slice. It is an error to pass in two byte slices that do not have the +// same length. +func XORBytes(a, b []byte) ([]byte, error) { + if len(a) != len(b) { + return nil, fmt.Errorf("length of byte slices is not equivalent: %d != %d", len(a), len(b)) + } + + buf := make([]byte, len(a)) + + for i, _ := range a { + buf[i] = a[i] ^ b[i] + } + + return buf, nil +} + +// XORBase64 takes two base64-encoded strings and XORs the decoded byte slices +// together, returning the final byte slice. It is an error to pass in two +// strings that do not have the same length to their base64-decoded byte slice. +func XORBase64(a, b string) ([]byte, error) { + aBytes, err := base64.StdEncoding.DecodeString(a) + if err != nil { + return nil, fmt.Errorf("error decoding first base64 value: %w", err) + } + if aBytes == nil || len(aBytes) == 0 { + return nil, fmt.Errorf("decoded first base64 value is nil or empty") + } + + bBytes, err := base64.StdEncoding.DecodeString(b) + if err != nil { + return nil, fmt.Errorf("error decoding second base64 value: %w", err) + } + if bBytes == nil || len(bBytes) == 0 { + return nil, fmt.Errorf("decoded second base64 value is nil or empty") + } + + return XORBytes(aBytes, bBytes) +} diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/types.pb.go b/vendor/github.com/hashicorp/go-kms-wrapping/types.pb.go new file mode 100644 index 0000000000..e250f351c2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/types.pb.go @@ -0,0 +1,231 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: types.proto + +package wrapping + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// EncryptedBlobInfo contains information about the encrypted value along with +// information about the key used to encrypt it +type EncryptedBlobInfo struct { + // Ciphertext is the encrypted bytes + Ciphertext []byte `protobuf:"bytes,1,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // IV is the initialization value used during encryption + IV []byte `protobuf:"bytes,2,opt,name=iv,proto3" json:"iv,omitempty"` + // HMAC is the bytes of the HMAC, if any + HMAC []byte `protobuf:"bytes,3,opt,name=hmac,proto3" json:"hmac,omitempty"` + // Wrapped can be used by the client to indicate whether Ciphertext + // actually contains wrapped data or not. This can be useful if you want to + // reuse the same struct to pass data along before and after wrapping. + Wrapped bool `protobuf:"varint,4,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + // KeyInfo contains information about the key that was used to create this value + KeyInfo *KeyInfo `protobuf:"bytes,5,opt,name=key_info,json=keyInfo,proto3" json:"key_info,omitempty"` + // ValuePath can be used by the client to store information about where the + // value came from + ValuePath string `protobuf:"bytes,6,opt,name=ValuePath,proto3" json:"ValuePath,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptedBlobInfo) Reset() { *m = EncryptedBlobInfo{} } +func (m *EncryptedBlobInfo) String() string { return proto.CompactTextString(m) } +func (*EncryptedBlobInfo) ProtoMessage() {} +func (*EncryptedBlobInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0} +} + +func (m *EncryptedBlobInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptedBlobInfo.Unmarshal(m, b) +} +func (m *EncryptedBlobInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptedBlobInfo.Marshal(b, m, deterministic) +} +func (m *EncryptedBlobInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptedBlobInfo.Merge(m, src) +} +func (m *EncryptedBlobInfo) XXX_Size() int { + return xxx_messageInfo_EncryptedBlobInfo.Size(m) +} +func (m *EncryptedBlobInfo) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptedBlobInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptedBlobInfo proto.InternalMessageInfo + +func (m *EncryptedBlobInfo) GetCiphertext() []byte { + if m != nil { + return m.Ciphertext + } + return nil +} + +func (m *EncryptedBlobInfo) GetIV() []byte { + if m != nil { + return m.IV + } + return nil +} + +func (m *EncryptedBlobInfo) GetHMAC() []byte { + if m != nil { + return m.HMAC + } + return nil +} + +func (m *EncryptedBlobInfo) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *EncryptedBlobInfo) GetKeyInfo() *KeyInfo { + if m != nil { + return m.KeyInfo + } + return nil +} + +func (m *EncryptedBlobInfo) GetValuePath() string { + if m != nil { + return m.ValuePath + } + return "" +} + +// KeyInfo contains information regarding which Wrapper key was used to +// encrypt the entry +type KeyInfo struct { + // Mechanism is the method used by the wrapper to encrypt and sign the + // data as defined by the wrapper. + Mechanism uint64 `protobuf:"varint,1,opt,name=Mechanism,proto3" json:"Mechanism,omitempty"` + HMACMechanism uint64 `protobuf:"varint,2,opt,name=HMACMechanism,proto3" json:"HMACMechanism,omitempty"` + // This is an opaque ID used by the wrapper to identify the specific + // key to use as defined by the wrapper. This could be a version, key + // label, or something else. + KeyID string `protobuf:"bytes,3,opt,name=KeyID,proto3" json:"KeyID,omitempty"` + HMACKeyID string `protobuf:"bytes,4,opt,name=HMACKeyID,proto3" json:"HMACKeyID,omitempty"` + // These value are used when generating our own data encryption keys + // and encrypting them using the wrapper + WrappedKey []byte `protobuf:"bytes,5,opt,name=WrappedKey,proto3" json:"WrappedKey,omitempty"` + // Mechanism specific flags + Flags uint64 `protobuf:"varint,6,opt,name=Flags,proto3" json:"Flags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyInfo) Reset() { *m = KeyInfo{} } +func (m *KeyInfo) String() string { return proto.CompactTextString(m) } +func (*KeyInfo) ProtoMessage() {} +func (*KeyInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{1} +} + +func (m *KeyInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyInfo.Unmarshal(m, b) +} +func (m *KeyInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyInfo.Marshal(b, m, deterministic) +} +func (m *KeyInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyInfo.Merge(m, src) +} +func (m *KeyInfo) XXX_Size() int { + return xxx_messageInfo_KeyInfo.Size(m) +} +func (m *KeyInfo) XXX_DiscardUnknown() { + xxx_messageInfo_KeyInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyInfo proto.InternalMessageInfo + +func (m *KeyInfo) GetMechanism() uint64 { + if m != nil { + return m.Mechanism + } + return 0 +} + +func (m *KeyInfo) GetHMACMechanism() uint64 { + if m != nil { + return m.HMACMechanism + } + return 0 +} + +func (m *KeyInfo) GetKeyID() string { + if m != nil { + return m.KeyID + } + return "" +} + +func (m *KeyInfo) GetHMACKeyID() string { + if m != nil { + return m.HMACKeyID + } + return "" +} + +func (m *KeyInfo) GetWrappedKey() []byte { + if m != nil { + return m.WrappedKey + } + return nil +} + +func (m *KeyInfo) GetFlags() uint64 { + if m != nil { + return m.Flags + } + return 0 +} + +func init() { + proto.RegisterType((*EncryptedBlobInfo)(nil), "github.com.hashicorp.go.kms.wrapping.types.EncryptedBlobInfo") + proto.RegisterType((*KeyInfo)(nil), "github.com.hashicorp.go.kms.wrapping.types.KeyInfo") +} + +func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } + +var fileDescriptor_d938547f84707355 = []byte{ + // 314 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xbd, 0x4e, 0xfb, 0x30, + 0x14, 0xc5, 0xe5, 0xfc, 0xd3, 0xaf, 0xdb, 0xfe, 0x91, 0xb0, 0x18, 0x3c, 0x20, 0x14, 0x55, 0x0c, + 0x11, 0x83, 0x07, 0xfa, 0x04, 0x94, 0x0f, 0x81, 0xaa, 0x22, 0xe4, 0x01, 0x24, 0x16, 0xe4, 0xa6, + 0x6e, 0x6c, 0xb5, 0x89, 0xad, 0xc4, 0x2d, 0xe4, 0xc9, 0x78, 0x1c, 0x5e, 0x05, 0xd9, 0x56, 0x49, + 0xd9, 0xd8, 0xee, 0xfd, 0xe5, 0xe4, 0xf8, 0x1c, 0x1b, 0x86, 0xb6, 0x31, 0xa2, 0xa6, 0xa6, 0xd2, + 0x56, 0xe3, 0x8b, 0x5c, 0x59, 0xb9, 0x5d, 0xd0, 0x4c, 0x17, 0x54, 0xf2, 0x5a, 0xaa, 0x4c, 0x57, + 0x86, 0xe6, 0x9a, 0xae, 0x8b, 0x9a, 0xbe, 0x57, 0xdc, 0x18, 0x55, 0xe6, 0xd4, 0xff, 0x31, 0xfe, + 0x42, 0x70, 0x7c, 0x5b, 0x66, 0x55, 0x63, 0xac, 0x58, 0x4e, 0x37, 0x7a, 0xf1, 0x50, 0xae, 0x34, + 0x3e, 0x03, 0xc8, 0x94, 0x91, 0xa2, 0xb2, 0xe2, 0xc3, 0x12, 0x94, 0xa0, 0x74, 0xc4, 0x0e, 0x08, + 0x3e, 0x82, 0x48, 0xed, 0x48, 0xe4, 0x79, 0xa4, 0x76, 0x18, 0x43, 0x2c, 0x0b, 0x9e, 0x91, 0x7f, + 0x9e, 0xf8, 0x19, 0x13, 0xe8, 0xf9, 0xb3, 0xc4, 0x92, 0xc4, 0x09, 0x4a, 0xfb, 0x6c, 0xbf, 0xe2, + 0x47, 0xe8, 0xaf, 0x45, 0xf3, 0xa6, 0xca, 0x95, 0x26, 0x9d, 0x04, 0xa5, 0xc3, 0xcb, 0x09, 0xfd, + 0x7b, 0x64, 0x3a, 0x13, 0x8d, 0x0b, 0xc9, 0x7a, 0xeb, 0x30, 0xe0, 0x53, 0x18, 0x3c, 0xf3, 0xcd, + 0x56, 0x3c, 0x71, 0x2b, 0x49, 0x37, 0x41, 0xe9, 0x80, 0xb5, 0x60, 0xfc, 0x89, 0xa0, 0x37, 0x6b, + 0x95, 0x73, 0x91, 0x49, 0x5e, 0xaa, 0xba, 0xf0, 0xb5, 0x62, 0xd6, 0x02, 0x7c, 0x0e, 0xff, 0xef, + 0xe7, 0x57, 0xd7, 0xad, 0x22, 0xf2, 0x8a, 0xdf, 0x10, 0x9f, 0x40, 0xc7, 0xd9, 0xdd, 0xf8, 0xb2, + 0x03, 0x16, 0x16, 0xe7, 0xec, 0x64, 0xe1, 0x4b, 0x1c, 0x32, 0xfc, 0x00, 0x77, 0x9f, 0x2f, 0xa1, + 0xfc, 0x4c, 0x34, 0xbe, 0xf3, 0x88, 0x1d, 0x10, 0xe7, 0x79, 0xb7, 0xe1, 0x79, 0xed, 0xd3, 0xc7, + 0x2c, 0x2c, 0x53, 0x78, 0xed, 0xef, 0xab, 0x2f, 0xba, 0xfe, 0x69, 0x27, 0xdf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x36, 0x15, 0x1f, 0xd8, 0xe9, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/types.proto b/vendor/github.com/hashicorp/go-kms-wrapping/types.proto new file mode 100644 index 0000000000..c27932affc --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/types.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +option go_package = "wrapping"; + +package github.com.hashicorp.go.kms.wrapping.types; + +// EncryptedBlobInfo contains information about the encrypted value along with +// information about the key used to encrypt it +message EncryptedBlobInfo { + // Ciphertext is the encrypted bytes + bytes ciphertext = 1; + + // IV is the initialization value used during encryption + bytes iv = 2; + + // HMAC is the bytes of the HMAC, if any + bytes hmac = 3; + + // Wrapped can be used by the client to indicate whether Ciphertext + // actually contains wrapped data or not. This can be useful if you want to + // reuse the same struct to pass data along before and after wrapping. + bool wrapped = 4; + + // KeyInfo contains information about the key that was used to create this value + KeyInfo key_info = 5; + + // ValuePath can be used by the client to store information about where the + // value came from + string ValuePath = 6; +} + +// KeyInfo contains information regarding which Wrapper key was used to +// encrypt the entry +message KeyInfo { + // Mechanism is the method used by the wrapper to encrypt and sign the + // data as defined by the wrapper. + uint64 Mechanism = 1; + uint64 HMACMechanism = 2; + + // This is an opaque ID used by the wrapper to identify the specific + // key to use as defined by the wrapper. This could be a version, key + // label, or something else. + string KeyID = 3; + string HMACKeyID = 4; + + // These value are used when generating our own data encryption keys + // and encrypting them using the wrapper + bytes WrappedKey = 5; + + // Mechanism specific flags + uint64 Flags = 6; +} diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/wrapper.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrapper.go new file mode 100644 index 0000000000..643b6f9038 --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrapper.go @@ -0,0 +1,51 @@ +package wrapping + +import ( + "context" + + "github.com/hashicorp/go-hclog" +) + +// These values define known types of Wrappers +const ( + Shamir = "shamir" + PKCS11 = "pkcs11" + AliCloudKMS = "alicloudkms" + AWSKMS = "awskms" + GCPCKMS = "gcpckms" + AzureKeyVault = "azurekeyvault" + OCIKMS = "ocikms" + Transit = "transit" + Test = "test-auto" + + // HSMAutoDeprecated is a deprecated type relevant to Vault prior to 0.9.0. + // It is still referenced in certain code paths for upgrade purporses + HSMAutoDeprecated = "hsm-auto" +) + +// Wrapper is the embedded implementation of autoSeal that contains logic +// specific to encrypting and decrypting data, or in this case keys. +type Wrapper interface { + // Type is the type of Wrapper + Type() string + + // KeyID is the ID of the key currently used for encryption + KeyID() string + // HMACKeyID is the ID of the key currently used for HMACing (if any) + HMACKeyID() string + + // Init allows performing any necessary setup calls before using this Wrapper + Init(context.Context) error + // Finalize should be called when all usage of this Wrapper is done + Finalize(context.Context) error + + // Encrypt encrypts the given byte slice and puts information about the final result in the returned value. The second byte slice is to pass any additional authenticated data; this may or may not be used depending on the particular implementation. + Encrypt(context.Context, []byte, []byte) (*EncryptedBlobInfo, error) + // Decrypt takes in the value and decrypts it into the byte slice. The byte slice is to pass any additional authenticated data; this may or may not be used depending on the particular implementation. + Decrypt(context.Context, *EncryptedBlobInfo, []byte) ([]byte, error) +} + +// WrapperOptions contains options used when creating a Wrapper +type WrapperOptions struct { + Logger hclog.Logger +} diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/wrapper_testing.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrapper_testing.go new file mode 100644 index 0000000000..e13f0b9d8d --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrapper_testing.go @@ -0,0 +1,102 @@ +package wrapping + +import ( + "context" + + "github.com/hashicorp/go-kms-wrapping/internal/xor" +) + +// TestWrapper is a wrapper that can be used for tests +type TestWrapper struct { + wrapperType string + secret []byte + keyID string +} + +var _ Wrapper = (*TestWrapper)(nil) + +// NewTestWrapper constructs a test wrapper +func NewTestWrapper(secret []byte) *TestWrapper { + return &TestWrapper{ + wrapperType: Test, + secret: secret, + keyID: "static-key", + } +} + +// Init initializes the test wrapper +func (t *TestWrapper) Init(_ context.Context) error { + return nil +} + +// Finalize finalizes the test wrapper +func (t *TestWrapper) Finalize(_ context.Context) error { + return nil +} + +// Type returns the type of the test wrapper +func (t *TestWrapper) Type() string { + return t.wrapperType +} + +// KeyID returns the configured key ID +func (t *TestWrapper) KeyID() string { + return t.keyID +} + +// HMACKeyID returns the configured HMAC key ID +func (t *TestWrapper) HMACKeyID() string { + return "" +} + +// SetKeyID allows setting the test wrapper's key ID +func (t *TestWrapper) SetKeyID(k string) { + t.keyID = k +} + +// Encrypt allows encrypting via the test wrapper +func (t *TestWrapper) Encrypt(_ context.Context, plaintext, _ []byte) (*EncryptedBlobInfo, error) { + ct, err := t.obscureBytes(plaintext) + if err != nil { + return nil, err + } + + return &EncryptedBlobInfo{ + Ciphertext: ct, + KeyInfo: &KeyInfo{ + KeyID: t.KeyID(), + }, + }, nil +} + +// Decrypt allows decrypting via the test wrapper +func (t *TestWrapper) Decrypt(_ context.Context, dwi *EncryptedBlobInfo, _ []byte) ([]byte, error) { + return t.obscureBytes(dwi.Ciphertext) +} + +// obscureBytes is a helper to simulate "encryption/decryption" +// on protected values. +func (t *TestWrapper) obscureBytes(in []byte) ([]byte, error) { + out := make([]byte, len(in)) + + if len(t.secret) != 0 { + // make sure they are the same length + localSecret := make([]byte, len(in)) + copy(localSecret, t.secret) + + var err error + + out, err = xor.XORBytes(in, localSecret) + if err != nil { + return nil, err + } + + } else { + // if there is no secret, simply reverse the string + for i := 0; i < len(in); i++ { + out[i] = in[len(in)-1-i] + } + } + + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/aead/aead.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/aead/aead.go new file mode 100644 index 0000000000..fdc3f86a2e --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/aead/aead.go @@ -0,0 +1,121 @@ +package aead + +import ( + "context" + "crypto/aes" + "crypto/cipher" + "errors" + + wrapping "github.com/hashicorp/go-kms-wrapping" + "github.com/hashicorp/go-uuid" +) + +// Wrapper implements the wrapping.Wrapper interface for Shamir +type Wrapper struct { + keyBytes []byte + aead cipher.AEAD +} + +// Ensure that we are implementing AutoSealAccess +var _ wrapping.Wrapper = (*Wrapper)(nil) + +// NewWrapper creates a new Wrapper with the provided logger +func NewWrapper(opts *wrapping.WrapperOptions) *Wrapper { + if opts == nil { + opts = new(wrapping.WrapperOptions) + } + seal := new(Wrapper) + return seal +} + +func (s *Wrapper) GetKeyBytes() []byte { + return s.keyBytes +} + +func (s *Wrapper) SetAEAD(aead cipher.AEAD) { + s.aead = aead +} + +// SetAESGCMKeyBytes takes in a byte slice and constucts an AES-GCM AEAD from it +func (s *Wrapper) SetAESGCMKeyBytes(key []byte) error { + aesCipher, err := aes.NewCipher(key) + if err != nil { + return err + } + + aead, err := cipher.NewGCM(aesCipher) + if err != nil { + return err + } + + s.keyBytes = key + s.aead = aead + return nil +} + +// Init is a no-op at the moment +func (s *Wrapper) Init(_ context.Context) error { + return nil +} + +// Finalize is called during shutdown. This is a no-op since +// Wrapper doesn't require any cleanup. +func (s *Wrapper) Finalize(_ context.Context) error { + return nil +} + +// Type returns the seal type for this particular Wrapper implementation +func (s *Wrapper) Type() string { + return wrapping.Shamir +} + +// KeyID returns the last known key id +func (s *Wrapper) KeyID() string { + return "" +} + +// HMACKeyID returns the last known HMAC key id +func (s *Wrapper) HMACKeyID() string { + return "" +} + +// Encrypt is used to encrypt the plaintext using the aead held by the seal. +func (s *Wrapper) Encrypt(_ context.Context, plaintext, aad []byte) (*wrapping.EncryptedBlobInfo, error) { + if plaintext == nil { + return nil, errors.New("given plaintext for encryption is nil") + } + + if s.aead == nil { + return nil, errors.New("aead is not configured in the seal") + } + + iv, err := uuid.GenerateRandomBytes(12) + if err != nil { + return nil, err + } + + ciphertext := s.aead.Seal(nil, iv, plaintext, aad) + + return &wrapping.EncryptedBlobInfo{ + Ciphertext: append(iv, ciphertext...), + }, nil +} + +func (s *Wrapper) Decrypt(_ context.Context, in *wrapping.EncryptedBlobInfo, aad []byte) ([]byte, error) { + if in == nil { + return nil, errors.New("given plaintext for encryption is nil") + } + + if s.aead == nil { + return nil, errors.New("aead is not configured in the seal") + } + + iv, ciphertext := in.Ciphertext[:12], in.Ciphertext[12:] + + plaintext, err := s.aead.Open(nil, iv, ciphertext, aad) + if err != nil { + return nil, err + } + + return plaintext, nil +} diff --git a/vault/seal/alicloudkms/alicloudkms.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/alicloudkms.go similarity index 61% rename from vault/seal/alicloudkms/alicloudkms.go rename to vendor/github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/alicloudkms.go index 670d0d1092..3a0b222d50 100644 --- a/vault/seal/alicloudkms/alicloudkms.go +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/alicloudkms.go @@ -7,63 +7,64 @@ import ( "fmt" "os" "sync/atomic" - "time" - - "github.com/armon/go-metrics" "github.com/aliyun/alibaba-cloud-sdk-go/sdk" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" "github.com/aliyun/alibaba-cloud-sdk-go/services/kms" - "github.com/hashicorp/errwrap" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/vault/seal" + wrapping "github.com/hashicorp/go-kms-wrapping" ) +// These constants contain the accepted env vars; the Vault one is for backwards compat const ( - EnvAliCloudKMSSealKeyID = "VAULT_ALICLOUDKMS_SEAL_KEY_ID" + EnvAliCloudKMSWrapperKeyID = "ALICLOUDKMS_WRAPPER_KEY_ID" + EnvVaultAliCloudKMSSealKeyID = "VAULT_ALICLOUDKMS_SEAL_KEY_ID" ) -type AliCloudKMSSeal struct { - logger log.Logger +// Wrapper is a Wrapper that uses AliCloud's KMS +type Wrapper struct { client kmsClient domain string keyID string currentKeyID *atomic.Value } -// Ensure that we are implementing AutoSealAccess -var _ seal.Access = (*AliCloudKMSSeal)(nil) +// Ensure that we are implementing Wrapper +var _ wrapping.Wrapper = (*Wrapper)(nil) -func NewSeal(logger log.Logger) *AliCloudKMSSeal { - k := &AliCloudKMSSeal{ - logger: logger, +// NewWrapper creates a new AliCloud Wrapper +func NewWrapper(opts *wrapping.WrapperOptions) *Wrapper { + if opts == nil { + opts = new(wrapping.WrapperOptions) + } + k := &Wrapper{ currentKeyID: new(atomic.Value), } k.currentKeyID.Store("") return k } -// SetConfig sets the fields on the AliCloudKMSSeal object based on +// SetConfig sets the fields on the AliCloudKMSWrapper object based on // values from the config parameter. // // Order of precedence AliCloud values: // * Environment variable // * Value from Vault configuration file // * Instance metadata role (access key and secret key) -func (k *AliCloudKMSSeal) SetConfig(config map[string]string) (map[string]string, error) { +func (k *Wrapper) SetConfig(config map[string]string) (map[string]string, error) { if config == nil { config = map[string]string{} } // Check and set KeyID switch { - case os.Getenv(EnvAliCloudKMSSealKeyID) != "": - k.keyID = os.Getenv(EnvAliCloudKMSSealKeyID) + case os.Getenv(EnvAliCloudKMSWrapperKeyID) != "": + k.keyID = os.Getenv(EnvAliCloudKMSWrapperKeyID) + case os.Getenv(EnvVaultAliCloudKMSSealKeyID) != "": + k.keyID = os.Getenv(EnvVaultAliCloudKMSSealKeyID) case config["kms_key_id"] != "": k.keyID = config["kms_key_id"] default: - return nil, fmt.Errorf("'kms_key_id' not found for AliCloud KMS seal configuration") + return nil, fmt.Errorf("'kms_key_id' not found for AliCloud KMS wrapper configuration") } region := "" @@ -127,7 +128,7 @@ func (k *AliCloudKMSSeal) SetConfig(config map[string]string) (map[string]string keyInfo, err := k.client.DescribeKey(input) if err != nil { - return nil, errwrap.Wrapf("error fetching AliCloud KMS sealkey information: {{err}}", err) + return nil, fmt.Errorf("error fetching AliCloud KMS key information: %w", err) } if keyInfo == nil || keyInfo.KeyMetadata.KeyId == "" { return nil, errors.New("no key information returned") @@ -138,61 +139,53 @@ func (k *AliCloudKMSSeal) SetConfig(config map[string]string) (map[string]string k.currentKeyID.Store(keyInfo.KeyMetadata.KeyId) // Map that holds non-sensitive configuration info - sealInfo := make(map[string]string) - sealInfo["region"] = region - sealInfo["kms_key_id"] = k.keyID + wrapperInfo := make(map[string]string) + wrapperInfo["region"] = region + wrapperInfo["kms_key_id"] = k.keyID if k.domain != "" { - sealInfo["domain"] = k.domain + wrapperInfo["domain"] = k.domain } - return sealInfo, nil + return wrapperInfo, nil } // Init is called during core.Initialize. No-op at the moment. -func (k *AliCloudKMSSeal) Init(_ context.Context) error { +func (k *Wrapper) Init(_ context.Context) error { return nil } // Finalize is called during shutdown. This is a no-op since -// AliCloudKMSSeal doesn't require any cleanup. -func (k *AliCloudKMSSeal) Finalize(_ context.Context) error { +// AliCloudKMSWrapper doesn't require any cleanup. +func (k *Wrapper) Finalize(_ context.Context) error { return nil } -// SealType returns the seal type for this particular seal implementation. -func (k *AliCloudKMSSeal) SealType() string { - return seal.AliCloudKMS +// Type returns the type for this particular wrapper implementation +func (k *Wrapper) Type() string { + return wrapping.AliCloudKMS } -// KeyID returns the last known key id. -func (k *AliCloudKMSSeal) KeyID() string { +// KeyID returns the last known key id +func (k *Wrapper) KeyID() string { return k.currentKeyID.Load().(string) } +// HMACKeyID returns nothing, it's here to satisfy the interface +func (k *Wrapper) HMACKeyID() string { + return "" +} + // Encrypt is used to encrypt the master key using the the AliCloud CMK. // This returns the ciphertext, and/or any errors from this // call. This should be called after the KMS client has been instantiated. -func (k *AliCloudKMSSeal) Encrypt(_ context.Context, plaintext []byte) (blob *physical.EncryptedBlobInfo, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "encrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "alicloudkms", "encrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "encrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "alicloudkms", "encrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "encrypt"}, 1) - metrics.IncrCounter([]string{"seal", "alicloudkms", "encrypt"}, 1) - +func (k *Wrapper) Encrypt(_ context.Context, plaintext, aad []byte) (blob *wrapping.EncryptedBlobInfo, err error) { if plaintext == nil { return nil, fmt.Errorf("given plaintext for encryption is nil") } - env, err := seal.NewEnvelope().Encrypt(plaintext) + env, err := wrapping.NewEnvelope(nil).Encrypt(plaintext, aad) if err != nil { - return nil, errwrap.Wrapf("error wrapping data: {{err}}", err) + return nil, fmt.Errorf("error wrapping data: %w", err) } input := kms.CreateEncryptRequest() @@ -202,17 +195,17 @@ func (k *AliCloudKMSSeal) Encrypt(_ context.Context, plaintext []byte) (blob *ph output, err := k.client.Encrypt(input) if err != nil { - return nil, errwrap.Wrapf("error encrypting data: {{err}}", err) + return nil, fmt.Errorf("error encrypting data: %w", err) } // Store the current key id. keyID := output.KeyId k.currentKeyID.Store(keyID) - ret := &physical.EncryptedBlobInfo{ + ret := &wrapping.EncryptedBlobInfo{ Ciphertext: env.Ciphertext, IV: env.IV, - KeyInfo: &physical.SealKeyInfo{ + KeyInfo: &wrapping.KeyInfo{ KeyID: keyID, WrappedKey: []byte(output.CiphertextBlob), }, @@ -222,20 +215,7 @@ func (k *AliCloudKMSSeal) Encrypt(_ context.Context, plaintext []byte) (blob *ph } // Decrypt is used to decrypt the ciphertext. This should be called after Init. -func (k *AliCloudKMSSeal) Decrypt(_ context.Context, in *physical.EncryptedBlobInfo) (pt []byte, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "decrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "alicloudkms", "decrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "decrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "alicloudkms", "decrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "decrypt"}, 1) - metrics.IncrCounter([]string{"seal", "alicloudkms", "decrypt"}, 1) - +func (k *Wrapper) Decrypt(_ context.Context, in *wrapping.EncryptedBlobInfo, aad []byte) (pt []byte, err error) { if in == nil { return nil, fmt.Errorf("given input for decryption is nil") } @@ -248,7 +228,7 @@ func (k *AliCloudKMSSeal) Decrypt(_ context.Context, in *physical.EncryptedBlobI output, err := k.client.Decrypt(input) if err != nil { - return nil, errwrap.Wrapf("error decrypting data encryption key: {{err}}", err) + return nil, fmt.Errorf("error decrypting data encryption key: %w", err) } keyBytes, err := base64.StdEncoding.DecodeString(output.Plaintext) @@ -256,14 +236,14 @@ func (k *AliCloudKMSSeal) Decrypt(_ context.Context, in *physical.EncryptedBlobI return nil, err } - envInfo := &seal.EnvelopeInfo{ + envInfo := &wrapping.EnvelopeInfo{ Key: keyBytes, IV: in.IV, Ciphertext: in.Ciphertext, } - plaintext, err := seal.NewEnvelope().Decrypt(envInfo) + plaintext, err := wrapping.NewEnvelope(nil).Decrypt(envInfo, aad) if err != nil { - return nil, errwrap.Wrapf("error decrypting data: {{err}}", err) + return nil, fmt.Errorf("error decrypting data: %w", err) } return plaintext, nil diff --git a/vault/seal/awskms/awskms.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/awskms/awskms.go similarity index 57% rename from vault/seal/awskms/awskms.go rename to vendor/github.com/hashicorp/go-kms-wrapping/wrappers/awskms/awskms.go index bbd92a2bf5..076bdaf165 100644 --- a/vault/seal/awskms/awskms.go +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/awskms/awskms.go @@ -6,29 +6,22 @@ import ( "fmt" "os" "sync/atomic" - "time" - metrics "github.com/armon/go-metrics" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kms" "github.com/aws/aws-sdk-go/service/kms/kmsiface" - "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/awsutil" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/vault/seal" + wrapping "github.com/hashicorp/go-kms-wrapping" + "github.com/hashicorp/vault/sdk/helper/awsutil" ) +// These constants contain the accepted env vars; the Vault one is for backwards compat const ( - // EnvAWSKMSSealKeyID is the AWS KMS key ID to use for encryption and decryption - EnvAWSKMSSealKeyID = "VAULT_AWSKMS_SEAL_KEY_ID" + EnvAWSKMSWrapperKeyID = "AWSKMS_WRAPPER_KEY_ID" + EnvVaultAWSKMSSealKeyID = "VAULT_AWSKMS_SEAL_KEY_ID" ) -// AWSKMSMechanism is the method used to encrypt/decrypt in the autoseal -type AWSKMSMechanism uint32 - const ( // AWSKMSEncrypt is used to directly encrypt the data with KMS AWSKMSEncrypt = iota @@ -37,9 +30,9 @@ const ( AWSKMSEnvelopeAESGCMEncrypt ) -// AWSKMSSeal represents credentials and Key information for the KMS Key used to +// Wrapper represents credentials and Key information for the KMS Key used to // encryption and decryption -type AWSKMSSeal struct { +type Wrapper struct { accessKey string secretKey string sessionToken string @@ -50,23 +43,24 @@ type AWSKMSSeal struct { currentKeyID *atomic.Value client kmsiface.KMSAPI - logger log.Logger } -// Ensure that we are implementing AutoSealAccess -var _ seal.Access = (*AWSKMSSeal)(nil) +// Ensure that we are implementing Wrapper +var _ wrapping.Wrapper = (*Wrapper)(nil) -// NewSeal creates a new AWSKMS seal with the provided logger -func NewSeal(logger log.Logger) *AWSKMSSeal { - k := &AWSKMSSeal{ - logger: logger, +// NewWrapper creates a new AWSKMS wrapper with the provided options +func NewWrapper(opts *wrapping.WrapperOptions) *Wrapper { + if opts == nil { + opts = new(wrapping.WrapperOptions) + } + k := &Wrapper{ currentKeyID: new(atomic.Value), } k.currentKeyID.Store("") return k } -// SetConfig sets the fields on the AWSKMSSeal object based on +// SetConfig sets the fields on the Wrapper object based on // values from the config parameter. // // Order of precedence AWS values: @@ -74,23 +68,29 @@ func NewSeal(logger log.Logger) *AWSKMSSeal { // * Value from Vault configuration file // * Instance metadata role (access key and secret key) // * Default values -func (k *AWSKMSSeal) SetConfig(config map[string]string) (map[string]string, error) { +func (k *Wrapper) SetConfig(config map[string]string) (map[string]string, error) { if config == nil { config = map[string]string{} } // Check and set KeyID switch { - case os.Getenv(EnvAWSKMSSealKeyID) != "": - k.keyID = os.Getenv(EnvAWSKMSSealKeyID) + case os.Getenv(EnvAWSKMSWrapperKeyID) != "": + k.keyID = os.Getenv(EnvAWSKMSWrapperKeyID) + case os.Getenv(EnvVaultAWSKMSSealKeyID) != "": + k.keyID = os.Getenv(EnvVaultAWSKMSSealKeyID) case config["kms_key_id"] != "": k.keyID = config["kms_key_id"] default: - return nil, fmt.Errorf("'kms_key_id' not found for AWS KMS seal configuration") + return nil, fmt.Errorf("'kms_key_id' not found for AWS KMS wrapper configuration") } - // Please see GetOrDefaultRegion for an explanation of the order in which region is parsed. - k.region = awsutil.GetOrDefaultRegion(k.logger, config["region"]) + // Please see GetRegion for an explanation of the order in which region is parsed. + var err error + k.region, err = awsutil.GetRegion(config["region"]) + if err != nil { + return nil, err + } // Check and set AWS access key, secret key, and session token k.accessKey = config["access_key"] @@ -108,7 +108,7 @@ func (k *AWSKMSSeal) SetConfig(config map[string]string) (map[string]string, err if k.client == nil { client, err := k.getAWSKMSClient() if err != nil { - return nil, errwrap.Wrapf("error initializing AWS KMS sealclient: {{err}}", err) + return nil, fmt.Errorf("error initializing AWS KMS wrapping client: %w", err) } // Test the client connection using provided key ID @@ -116,7 +116,7 @@ func (k *AWSKMSSeal) SetConfig(config map[string]string) (map[string]string, err KeyId: aws.String(k.keyID), }) if err != nil { - return nil, errwrap.Wrapf("error fetching AWS KMS sealkey information: {{err}}", err) + return nil, fmt.Errorf("error fetching AWS KMS wrapping key information: %w", err) } if keyInfo == nil || keyInfo.KeyMetadata == nil || keyInfo.KeyMetadata.KeyId == nil { return nil, errors.New("no key information returned") @@ -127,61 +127,53 @@ func (k *AWSKMSSeal) SetConfig(config map[string]string) (map[string]string, err } // Map that holds non-sensitive configuration info - sealInfo := make(map[string]string) - sealInfo["region"] = k.region - sealInfo["kms_key_id"] = k.keyID + wrappingInfo := make(map[string]string) + wrappingInfo["region"] = k.region + wrappingInfo["kms_key_id"] = k.keyID if k.endpoint != "" { - sealInfo["endpoint"] = k.endpoint + wrappingInfo["endpoint"] = k.endpoint } - return sealInfo, nil + return wrappingInfo, nil } // Init is called during core.Initialize. No-op at the moment. -func (k *AWSKMSSeal) Init(_ context.Context) error { +func (k *Wrapper) Init(_ context.Context) error { return nil } // Finalize is called during shutdown. This is a no-op since -// AWSKMSSeal doesn't require any cleanup. -func (k *AWSKMSSeal) Finalize(_ context.Context) error { +// Wrapper doesn't require any cleanup. +func (k *Wrapper) Finalize(_ context.Context) error { return nil } -// SealType returns the seal type for this particular seal implementation. -func (k *AWSKMSSeal) SealType() string { - return seal.AWSKMS +// Type returns the wrapping type for this particular Wrapper implementation +func (k *Wrapper) Type() string { + return wrapping.AWSKMS } -// KeyID returns the last known key id. -func (k *AWSKMSSeal) KeyID() string { +// KeyID returns the last known key id +func (k *Wrapper) KeyID() string { return k.currentKeyID.Load().(string) } +// HMACKeyID returns the last known HMAC key id +func (k *Wrapper) HMACKeyID() string { + return "" +} + // Encrypt is used to encrypt the master key using the the AWS CMK. // This returns the ciphertext, and/or any errors from this // call. This should be called after the KMS client has been instantiated. -func (k *AWSKMSSeal) Encrypt(_ context.Context, plaintext []byte) (blob *physical.EncryptedBlobInfo, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "encrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "awskms", "encrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "encrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "awskms", "encrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "encrypt"}, 1) - metrics.IncrCounter([]string{"seal", "awskms", "encrypt"}, 1) - +func (k *Wrapper) Encrypt(_ context.Context, plaintext, aad []byte) (blob *wrapping.EncryptedBlobInfo, err error) { if plaintext == nil { return nil, fmt.Errorf("given plaintext for encryption is nil") } - env, err := seal.NewEnvelope().Encrypt(plaintext) + env, err := wrapping.NewEnvelope(nil).Encrypt(plaintext, aad) if err != nil { - return nil, errwrap.Wrapf("error wrapping data: {{err}}", err) + return nil, fmt.Errorf("error wrapping data: %w", err) } if k.client == nil { @@ -194,17 +186,17 @@ func (k *AWSKMSSeal) Encrypt(_ context.Context, plaintext []byte) (blob *physica } output, err := k.client.Encrypt(input) if err != nil { - return nil, errwrap.Wrapf("error encrypting data: {{err}}", err) + return nil, fmt.Errorf("error encrypting data: %w", err) } // store the current key id keyID := aws.StringValue(output.KeyId) k.currentKeyID.Store(keyID) - ret := &physical.EncryptedBlobInfo{ + ret := &wrapping.EncryptedBlobInfo{ Ciphertext: env.Ciphertext, IV: env.IV, - KeyInfo: &physical.SealKeyInfo{ + KeyInfo: &wrapping.KeyInfo{ Mechanism: AWSKMSEnvelopeAESGCMEncrypt, // Even though we do not use the key id during decryption, store it // to know exactly the specific key used in encryption in case we @@ -218,27 +210,14 @@ func (k *AWSKMSSeal) Encrypt(_ context.Context, plaintext []byte) (blob *physica } // Decrypt is used to decrypt the ciphertext. This should be called after Init. -func (k *AWSKMSSeal) Decrypt(_ context.Context, in *physical.EncryptedBlobInfo) (pt []byte, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "decrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "awskms", "decrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "decrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "awskms", "decrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "decrypt"}, 1) - metrics.IncrCounter([]string{"seal", "awskms", "decrypt"}, 1) - +func (k *Wrapper) Decrypt(_ context.Context, in *wrapping.EncryptedBlobInfo, aad []byte) (pt []byte, err error) { if in == nil { return nil, fmt.Errorf("given input for decryption is nil") } // Default to mechanism used before key info was stored if in.KeyInfo == nil { - in.KeyInfo = &physical.SealKeyInfo{ + in.KeyInfo = &wrapping.KeyInfo{ Mechanism: AWSKMSEncrypt, } } @@ -252,7 +231,7 @@ func (k *AWSKMSSeal) Decrypt(_ context.Context, in *physical.EncryptedBlobInfo) output, err := k.client.Decrypt(input) if err != nil { - return nil, errwrap.Wrapf("error decrypting data: {{err}}", err) + return nil, fmt.Errorf("error decrypting data: %w", err) } plaintext = output.Plaintext @@ -264,17 +243,17 @@ func (k *AWSKMSSeal) Decrypt(_ context.Context, in *physical.EncryptedBlobInfo) } output, err := k.client.Decrypt(input) if err != nil { - return nil, errwrap.Wrapf("error decrypting data encryption key: {{err}}", err) + return nil, fmt.Errorf("error decrypting data encryption key: %w", err) } - envInfo := &seal.EnvelopeInfo{ + envInfo := &wrapping.EnvelopeInfo{ Key: output.Plaintext, IV: in.IV, Ciphertext: in.Ciphertext, } - plaintext, err = seal.NewEnvelope().Decrypt(envInfo) + plaintext, err = wrapping.NewEnvelope(nil).Decrypt(envInfo, aad) if err != nil { - return nil, errwrap.Wrapf("error decrypting data: {{err}}", err) + return nil, fmt.Errorf("error decrypting data: %w", err) } default: @@ -285,7 +264,7 @@ func (k *AWSKMSSeal) Decrypt(_ context.Context, in *physical.EncryptedBlobInfo) } // getAWSKMSClient returns an instance of the KMS client. -func (k *AWSKMSSeal) getAWSKMSClient() (*kms.KMS, error) { +func (k *Wrapper) getAWSKMSClient() (*kms.KMS, error) { credsConfig := &awsutil.CredentialsConfig{} credsConfig.AccessKey = k.accessKey diff --git a/vault/seal/awskms/testing.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/awskms/testing.go similarity index 70% rename from vault/seal/awskms/testing.go rename to vendor/github.com/hashicorp/go-kms-wrapping/wrappers/awskms/testing.go index 252e11e836..756cd8526d 100644 --- a/vault/seal/awskms/testing.go +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/awskms/testing.go @@ -7,27 +7,25 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/kms" "github.com/aws/aws-sdk-go/service/kms/kmsiface" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" ) const awsTestKeyID = "foo" -func NewAWSKMSTestSeal() *AWSKMSSeal { - s := NewSeal(logging.NewVaultLogger(log.Trace)) - s.client = &mockAWSKMSSealClient{ +func NewAWSKMSTestWrapper() *Wrapper { + s := NewWrapper(nil) + s.client = &mockClient{ keyID: aws.String(awsTestKeyID), } return s } -type mockAWSKMSSealClient struct { +type mockClient struct { kmsiface.KMSAPI keyID *string } // Encrypt is a mocked call that returns a base64 encoded string. -func (m *mockAWSKMSSealClient) Encrypt(input *kms.EncryptInput) (*kms.EncryptOutput, error) { +func (m *mockClient) Encrypt(input *kms.EncryptInput) (*kms.EncryptOutput, error) { m.keyID = input.KeyId encoded := make([]byte, base64.StdEncoding.EncodedLen(len(input.Plaintext))) @@ -40,7 +38,7 @@ func (m *mockAWSKMSSealClient) Encrypt(input *kms.EncryptInput) (*kms.EncryptOut } // Decrypt is a mocked call that returns a decoded base64 string. -func (m *mockAWSKMSSealClient) Decrypt(input *kms.DecryptInput) (*kms.DecryptOutput, error) { +func (m *mockClient) Decrypt(input *kms.DecryptInput) (*kms.DecryptOutput, error) { decLen := base64.StdEncoding.DecodedLen(len(input.CiphertextBlob)) decoded := make([]byte, decLen) len, err := base64.StdEncoding.Decode(decoded, input.CiphertextBlob) @@ -59,7 +57,7 @@ func (m *mockAWSKMSSealClient) Decrypt(input *kms.DecryptInput) (*kms.DecryptOut } // DescribeKey is a mocked call that returns the keyID. -func (m *mockAWSKMSSealClient) DescribeKey(input *kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) { +func (m *mockClient) DescribeKey(input *kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) { if m.keyID == nil { return nil, awserr.New(kms.ErrCodeNotFoundException, "key not found", nil) } diff --git a/vault/seal/azurekeyvault/azurekeyvault.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/azurekeyvault.go similarity index 61% rename from vault/seal/azurekeyvault/azurekeyvault.go rename to vendor/github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/azurekeyvault.go index a3614158c0..5dbccbfe12 100644 --- a/vault/seal/azurekeyvault/azurekeyvault.go +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/azurekeyvault.go @@ -8,27 +8,30 @@ import ( "os" "strings" "sync/atomic" - "time" - "github.com/armon/go-metrics" - - "github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault" + "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/Azure/go-autorest/autorest/to" - "github.com/hashicorp/errwrap" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/vault/seal" + + wrapping "github.com/hashicorp/go-kms-wrapping" ) -// AzureKeyVaultSeal is an auto-seal that uses Azure Key Vault +const ( + EnvAzureKeyVaultWrapperVaultName = "AZUREKEYVAULT_WRAPPER_VAULT_NAME" + EnvVaultAzureKeyVaultVaultName = "VAULT_AZUREKEYVAULT_VAULT_NAME" + + EnvAzureKeyVaultWrapperKeyName = "AZUREKEYVAULT_WRAPPER_KEY_NAME" + EnvVaultAzureKeyVaultKeyName = "VAULT_AZUREKEYVAULT_KEY_NAME" +) + +// Wrapper is an Wrapper that uses Azure Key Vault // for crypto operations. Azure Key Vault currently does not support // keys that can encrypt long data (RSA keys). Due to this fact, we generate // and AES key and wrap the key using Key Vault and store it with the // data -type AzureKeyVaultSeal struct { +type Wrapper struct { tenantID string clientID string clientSecret string @@ -39,30 +42,31 @@ type AzureKeyVaultSeal struct { environment azure.Environment client *keyvault.BaseClient - - logger log.Logger } -// Ensure that we are implementing AutoSealAccess -var _ seal.Access = (*AzureKeyVaultSeal)(nil) +// Ensure that we are implementing Wrapper +var _ wrapping.Wrapper = (*Wrapper)(nil) -func NewSeal(logger log.Logger) *AzureKeyVaultSeal { - v := &AzureKeyVaultSeal{ - logger: logger, +// NewWrapper creates a new wrapper with the given options +func NewWrapper(opts *wrapping.WrapperOptions) *Wrapper { + if opts == nil { + opts = new(wrapping.WrapperOptions) + } + v := &Wrapper{ currentKeyID: new(atomic.Value), } v.currentKeyID.Store("") return v } -// SetConfig sets the fields on the AzureKeyVaultSeal object based on +// SetConfig sets the fields on the Wrapper object based on // values from the config parameter. // // Order of precedence: // * Environment variable // * Value from Vault configuration file // * Managed Service Identity for instance -func (v *AzureKeyVaultSeal) SetConfig(config map[string]string) (map[string]string, error) { +func (v *Wrapper) SetConfig(config map[string]string) (map[string]string, error) { if config == nil { config = map[string]string{} } @@ -103,8 +107,10 @@ func (v *AzureKeyVaultSeal) SetConfig(config map[string]string) (map[string]stri } switch { - case os.Getenv("VAULT_AZUREKEYVAULT_VAULT_NAME") != "": - v.vaultName = os.Getenv("VAULT_AZUREKEYVAULT_VAULT_NAME") + case os.Getenv(EnvAzureKeyVaultWrapperVaultName) != "": + v.vaultName = os.Getenv(EnvAzureKeyVaultWrapperVaultName) + case os.Getenv(EnvVaultAzureKeyVaultVaultName) != "": + v.vaultName = os.Getenv(EnvVaultAzureKeyVaultVaultName) case config["vault_name"] != "": v.vaultName = config["vault_name"] default: @@ -112,8 +118,10 @@ func (v *AzureKeyVaultSeal) SetConfig(config map[string]string) (map[string]stri } switch { - case os.Getenv("VAULT_AZUREKEYVAULT_KEY_NAME") != "": - v.keyName = os.Getenv("VAULT_AZUREKEYVAULT_KEY_NAME") + case os.Getenv(EnvAzureKeyVaultWrapperKeyName) != "": + v.keyName = os.Getenv(EnvAzureKeyVaultWrapperKeyName) + case os.Getenv(EnvVaultAzureKeyVaultKeyName) != "": + v.keyName = os.Getenv(EnvVaultAzureKeyVaultKeyName) case config["key_name"] != "": v.keyName = config["key_name"] default: @@ -123,13 +131,13 @@ func (v *AzureKeyVaultSeal) SetConfig(config map[string]string) (map[string]stri if v.client == nil { client, err := v.getKeyVaultClient() if err != nil { - return nil, errwrap.Wrapf("error initializing Azure Key Vault seal client: {{err}}", err) + return nil, fmt.Errorf("error initializing Azure Key Vault wrapper client: %w", err) } // Test the client connection using provided key ID keyInfo, err := client.GetKey(context.Background(), v.buildBaseURL(), v.keyName, "") if err != nil { - return nil, errwrap.Wrapf("error fetching Azure Key Vault seal key information: {{err}}", err) + return nil, fmt.Errorf("error fetching Azure Key Vault wrapper key information: %w", err) } if keyInfo.Key == nil { return nil, errors.New("no key information returned") @@ -140,58 +148,50 @@ func (v *AzureKeyVaultSeal) SetConfig(config map[string]string) (map[string]stri } // Map that holds non-sensitive configuration info - sealInfo := make(map[string]string) - sealInfo["environment"] = v.environment.Name - sealInfo["vault_name"] = v.vaultName - sealInfo["key_name"] = v.keyName + wrapperInfo := make(map[string]string) + wrapperInfo["environment"] = v.environment.Name + wrapperInfo["vault_name"] = v.vaultName + wrapperInfo["key_name"] = v.keyName - return sealInfo, nil + return wrapperInfo, nil } // Init is called during core.Initialize. This is a no-op. -func (v *AzureKeyVaultSeal) Init(context.Context) error { +func (v *Wrapper) Init(context.Context) error { return nil } // Finalize is called during shutdown. This is a no-op. -func (v *AzureKeyVaultSeal) Finalize(context.Context) error { +func (v *Wrapper) Finalize(context.Context) error { return nil } -// SealType returns the seal type for this particular seal implementation. -func (v *AzureKeyVaultSeal) SealType() string { - return seal.AzureKeyVault +// Type returns the type for this particular Wrapper implementation +func (v *Wrapper) Type() string { + return wrapping.AzureKeyVault } -// KeyID returns the last known key id. -func (v *AzureKeyVaultSeal) KeyID() string { +// KeyID returns the last known key id +func (v *Wrapper) KeyID() string { return v.currentKeyID.Load().(string) } +// HMACKeyID returns the last known HMAC key id +func (v *Wrapper) HMACKeyID() string { + return "" +} + // Encrypt is used to encrypt using Azure Key Vault. // This returns the ciphertext, and/or any errors from this // call. -func (v *AzureKeyVaultSeal) Encrypt(ctx context.Context, plaintext []byte) (blob *physical.EncryptedBlobInfo, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "encrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "azurekeyvault", "encrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "encrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "azurekeyvault", "encrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "encrypt"}, 1) - metrics.IncrCounter([]string{"seal", "azurekeyvault", "encrypt"}, 1) - +func (v *Wrapper) Encrypt(ctx context.Context, plaintext, aad []byte) (blob *wrapping.EncryptedBlobInfo, err error) { if plaintext == nil { return nil, errors.New("given plaintext for encryption is nil") } - env, err := seal.NewEnvelope().Encrypt(plaintext) + env, err := wrapping.NewEnvelope(nil).Encrypt(plaintext, aad) if err != nil { - return nil, errwrap.Wrapf("error wrapping dat: {{err}}", err) + return nil, fmt.Errorf("error wrapping dat: %w", err) } // Encrypt the DEK using Key Vault @@ -209,10 +209,10 @@ func (v *AzureKeyVaultSeal) Encrypt(ctx context.Context, plaintext []byte) (blob keyVersion := parseKeyVersion(to.String(resp.Kid)) v.currentKeyID.Store(keyVersion) - ret := &physical.EncryptedBlobInfo{ + ret := &wrapping.EncryptedBlobInfo{ Ciphertext: env.Ciphertext, IV: env.IV, - KeyInfo: &physical.SealKeyInfo{ + KeyInfo: &wrapping.KeyInfo{ KeyID: keyVersion, WrappedKey: []byte(to.String(resp.Result)), }, @@ -221,21 +221,8 @@ func (v *AzureKeyVaultSeal) Encrypt(ctx context.Context, plaintext []byte) (blob return ret, nil } -// Decrypt is used to decrypt the ciphertext. -func (v *AzureKeyVaultSeal) Decrypt(ctx context.Context, in *physical.EncryptedBlobInfo) (pt []byte, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "decrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "azurekeyvault", "decrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "decrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "azurekeyvault", "decrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "decrypt"}, 1) - metrics.IncrCounter([]string{"seal", "azurekeyvault", "decrypt"}, 1) - +// Decrypt is used to decrypt the ciphertext +func (v *Wrapper) Decrypt(ctx context.Context, in *wrapping.EncryptedBlobInfo, aad []byte) (pt []byte, err error) { if in == nil { return nil, errors.New("given input for decryption is nil") } @@ -258,19 +245,19 @@ func (v *AzureKeyVaultSeal) Decrypt(ctx context.Context, in *physical.EncryptedB if err != nil { return nil, err } - envInfo := &seal.EnvelopeInfo{ + envInfo := &wrapping.EnvelopeInfo{ Key: keyBytes, IV: in.IV, Ciphertext: in.Ciphertext, } - return seal.NewEnvelope().Decrypt(envInfo) + return wrapping.NewEnvelope(nil).Decrypt(envInfo, aad) } -func (v *AzureKeyVaultSeal) buildBaseURL() string { +func (v *Wrapper) buildBaseURL() string { return fmt.Sprintf("https://%s.%s/", v.vaultName, v.environment.KeyVaultDNSSuffix) } -func (v *AzureKeyVaultSeal) getKeyVaultClient() (*keyvault.BaseClient, error) { +func (v *Wrapper) getKeyVaultClient() (*keyvault.BaseClient, error) { var authorizer autorest.Authorizer var err error diff --git a/vault/seal/gcpckms/gcpckms.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/gcpckms.go similarity index 51% rename from vault/seal/gcpckms/gcpckms.go rename to vendor/github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/gcpckms.go index d2e5afbfd0..b019dd70dd 100644 --- a/vault/seal/gcpckms/gcpckms.go +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/gcpckms.go @@ -5,16 +5,9 @@ import ( "fmt" "os" "sync/atomic" - "time" - - "github.com/armon/go-metrics" cloudkms "cloud.google.com/go/kms/apiv1" - "github.com/hashicorp/errwrap" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/useragent" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/vault/seal" + wrapping "github.com/hashicorp/go-kms-wrapping" context "golang.org/x/net/context" "google.golang.org/api/option" kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" @@ -22,18 +15,17 @@ import ( const ( // General GCP values, follows TF naming conventions - EnvGCPCKMSSealCredsPath = "GOOGLE_CREDENTIALS" - EnvGCPCKMSSealProject = "GOOGLE_PROJECT" - EnvGCPCKMSSealLocation = "GOOGLE_REGION" + EnvGCPCKMSWrapperCredsPath = "GOOGLE_CREDENTIALS" + EnvGCPCKMSWrapperProject = "GOOGLE_PROJECT" + EnvGCPCKMSWrapperLocation = "GOOGLE_REGION" // CKMS-specific values - EnvGCPCKMSSealKeyRing = "VAULT_GCPCKMS_SEAL_KEY_RING" - EnvGCPCKMSSealCryptoKey = "VAULT_GCPCKMS_SEAL_CRYPTO_KEY" + EnvGCPCKMSWrapperKeyRing = "GCPCKMS_WRAPPER_KEY_RING" + EnvVaultGCPCKMSSealKeyRing = "VAULT_GCPCKMS_SEAL_KEY_RING" + EnvGCPCKMSWrapperCryptoKey = "GCPCKMS_WRAPPER_CRYPTO_KEY" + EnvVaultGCPCKMSSealCryptoKey = "VAULT_GCPCKMS_SEAL_CRYPTO_KEY" ) -// GCPKMSMechanism is the method used to encrypt/decrypt in the autoseal -type GCPKMSMechanism uint32 - const ( // GCPKMSEncrypt is used to directly encrypt the data with KMS GCPKMSEncrypt = iota @@ -42,7 +34,7 @@ const ( GCPKMSEnvelopeAESGCMEncrypt ) -type GCPCKMSSeal struct { +type Wrapper struct { // Values specific to IAM credsPath string // Path to the creds file generated during service account creation @@ -53,82 +45,91 @@ type GCPCKMSSeal struct { cryptoKey string parentName string // Parent path built from the above values + userAgent string + currentKeyID *atomic.Value client *cloudkms.KeyManagementClient - logger log.Logger } -var _ seal.Access = (*GCPCKMSSeal)(nil) +var _ wrapping.Wrapper = (*Wrapper)(nil) -func NewSeal(logger log.Logger) *GCPCKMSSeal { - s := &GCPCKMSSeal{ - logger: logger, +func NewWrapper(opts *wrapping.WrapperOptions) *Wrapper { + if opts == nil { + opts = new(wrapping.WrapperOptions) + } + s := &Wrapper{ currentKeyID: new(atomic.Value), } s.currentKeyID.Store("") return s } -// SetConfig sets the fields on the GCPCKMSSeal object based on values from the +// SetConfig sets the fields on the Wrapper object based on values from the // config parameter. Environment variables take precedence over values provided -// in the Vault configuration file (i.e. values in the `seal "gcpckms"` stanza). +// in the config struct. // // Order of precedence for GCP credentials file: // * GOOGLE_CREDENTIALS environment variable // * `credentials` value from Value configuration file // * GOOGLE_APPLICATION_CREDENTIALS (https://developers.google.com/identity/protocols/application-default-credentials) -func (s *GCPCKMSSeal) SetConfig(config map[string]string) (map[string]string, error) { +func (s *Wrapper) SetConfig(config map[string]string) (map[string]string, error) { if config == nil { config = map[string]string{} } + s.userAgent = config["user_agent"] + // Do not return an error in this case. Let client initialization in // getClient() attempt to sort out where to get default credentials internally // within the SDK (e.g. checking for GOOGLE_APPLICATION_CREDENTIALS), and let // it error out there if none is found. This is here to establish precedence on // non-default input methods. switch { - case os.Getenv(EnvGCPCKMSSealCredsPath) != "": - s.credsPath = os.Getenv(EnvGCPCKMSSealCredsPath) + case os.Getenv(EnvGCPCKMSWrapperCredsPath) != "": + s.credsPath = os.Getenv(EnvGCPCKMSWrapperCredsPath) case config["credentials"] != "": s.credsPath = config["credentials"] } switch { - case os.Getenv(EnvGCPCKMSSealProject) != "": - s.project = os.Getenv(EnvGCPCKMSSealProject) + case os.Getenv(EnvGCPCKMSWrapperProject) != "": + s.project = os.Getenv(EnvGCPCKMSWrapperProject) case config["project"] != "": s.project = config["project"] default: - return nil, errors.New("'project' not found for GCP CKMS seal configuration") + return nil, errors.New("'project' not found for GCP CKMS wrapper configuration") } switch { - case os.Getenv(EnvGCPCKMSSealLocation) != "": - s.location = os.Getenv(EnvGCPCKMSSealLocation) + case os.Getenv(EnvGCPCKMSWrapperLocation) != "": + s.location = os.Getenv(EnvGCPCKMSWrapperLocation) case config["region"] != "": s.location = config["region"] default: - return nil, errors.New("'region' not found for GCP CKMS seal configuration") + return nil, errors.New("'region' not found for GCP CKMS wrapper configuration") } switch { - case os.Getenv(EnvGCPCKMSSealKeyRing) != "": - s.keyRing = os.Getenv(EnvGCPCKMSSealKeyRing) + case os.Getenv(EnvGCPCKMSWrapperKeyRing) != "": + s.keyRing = os.Getenv(EnvGCPCKMSWrapperKeyRing) + case os.Getenv(EnvVaultGCPCKMSSealKeyRing) != "": + s.keyRing = os.Getenv(EnvVaultGCPCKMSSealKeyRing) case config["key_ring"] != "": s.keyRing = config["key_ring"] default: - return nil, errors.New("'key_ring' not found for GCP CKMS seal configuration") + return nil, errors.New("'key_ring' not found for GCP CKMS wrapper configuration") } switch { - case os.Getenv(EnvGCPCKMSSealCryptoKey) != "": - s.cryptoKey = os.Getenv(EnvGCPCKMSSealCryptoKey) + case os.Getenv(EnvGCPCKMSWrapperCryptoKey) != "": + s.cryptoKey = os.Getenv(EnvGCPCKMSWrapperCryptoKey) + case os.Getenv(EnvVaultGCPCKMSSealCryptoKey) != "": + s.cryptoKey = os.Getenv(EnvVaultGCPCKMSSealCryptoKey) case config["crypto_key"] != "": s.cryptoKey = config["crypto_key"] default: - return nil, errors.New("'crypto_key' not found for GCP CKMS seal configuration") + return nil, errors.New("'crypto_key' not found for GCP CKMS wrapper configuration") } // Set the parent name for encrypt/decrypt requests @@ -138,74 +139,66 @@ func (s *GCPCKMSSeal) SetConfig(config map[string]string) (map[string]string, er if s.client == nil { kmsClient, err := s.getClient() if err != nil { - return nil, errwrap.Wrapf("error initializing GCP CKMS seal client: {{err}}", err) + return nil, fmt.Errorf("error initializing GCP CKMS wrapper client: %w", err) } s.client = kmsClient // Make sure user has permissions to encrypt (also checks if key exists) ctx := context.Background() - if _, err := s.Encrypt(ctx, []byte("vault-gcpckms-test")); err != nil { - return nil, errwrap.Wrapf("failed to encrypt with GCP CKMS - ensure the "+ + if _, err := s.Encrypt(ctx, []byte("vault-gcpckms-test"), nil); err != nil { + return nil, fmt.Errorf("failed to encrypt with GCP CKMS - ensure the "+ "key exists and the service account has at least "+ - "roles/cloudkms.cryptoKeyEncrypterDecrypter permission: {{err}}", err) + "roles/cloudkms.cryptoKeyEncrypterDecrypter permission: %w", err) } } // Map that holds non-sensitive configuration info to return - sealInfo := make(map[string]string) - sealInfo["project"] = s.project - sealInfo["region"] = s.location - sealInfo["key_ring"] = s.keyRing - sealInfo["crypto_key"] = s.cryptoKey + wrapperInfo := make(map[string]string) + wrapperInfo["project"] = s.project + wrapperInfo["region"] = s.location + wrapperInfo["key_ring"] = s.keyRing + wrapperInfo["crypto_key"] = s.cryptoKey - return sealInfo, nil + return wrapperInfo, nil } -// Init is called during core.Initialize. No-op at the moment. -func (s *GCPCKMSSeal) Init(_ context.Context) error { +// Init is called during core.Initialize. No-op at the moment +func (s *Wrapper) Init(_ context.Context) error { return nil } // Finalize is called during shutdown. This is a no-op since -// GCPKMSSeal doesn't require any cleanup. -func (s *GCPCKMSSeal) Finalize(_ context.Context) error { +// Wrapper doesn't require any cleanup. +func (s *Wrapper) Finalize(_ context.Context) error { return nil } -// SealType returns the seal type for this particular seal implementation. -func (s *GCPCKMSSeal) SealType() string { - return seal.GCPCKMS +// Type returns the type for this particular wrapper implementation +func (s *Wrapper) Type() string { + return wrapping.GCPCKMS } -// KeyID returns the last known key id. -func (s *GCPCKMSSeal) KeyID() string { +// KeyID returns the last known key id +func (s *Wrapper) KeyID() string { return s.currentKeyID.Load().(string) } +// HMACKeyID returns the last known key id +func (s *Wrapper) HMACKeyID() string { + return "" +} + // Encrypt is used to encrypt the master key using the the AWS CMK. // This returns the ciphertext, and/or any errors from this // call. This should be called after s.client has been instantiated. -func (s *GCPCKMSSeal) Encrypt(ctx context.Context, plaintext []byte) (blob *physical.EncryptedBlobInfo, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "encrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "gcpckms", "encrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "encrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "gcpckms", "encrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "encrypt"}, 1) - metrics.IncrCounter([]string{"seal", "gcpckms", "encrypt"}, 1) - +func (s *Wrapper) Encrypt(ctx context.Context, plaintext, aad []byte) (blob *wrapping.EncryptedBlobInfo, err error) { if plaintext == nil { return nil, errors.New("given plaintext for encryption is nil") } - env, err := seal.NewEnvelope().Encrypt(plaintext) + env, err := wrapping.NewEnvelope(nil).Encrypt(plaintext, aad) if err != nil { - return nil, errwrap.Wrapf("error wrapping data: {{err}}", err) + return nil, fmt.Errorf("error wrapping data: %w", err) } resp, err := s.client.Encrypt(ctx, &kmspb.EncryptRequest{ @@ -219,10 +212,10 @@ func (s *GCPCKMSSeal) Encrypt(ctx context.Context, plaintext []byte) (blob *phys // Store current key id value s.currentKeyID.Store(resp.Name) - ret := &physical.EncryptedBlobInfo{ + ret := &wrapping.EncryptedBlobInfo{ Ciphertext: env.Ciphertext, IV: env.IV, - KeyInfo: &physical.SealKeyInfo{ + KeyInfo: &wrapping.KeyInfo{ Mechanism: GCPKMSEnvelopeAESGCMEncrypt, // Even though we do not use the key id during decryption, store it // to know exactly what version was used in encryption in case we @@ -236,27 +229,14 @@ func (s *GCPCKMSSeal) Encrypt(ctx context.Context, plaintext []byte) (blob *phys } // Decrypt is used to decrypt the ciphertext. -func (s *GCPCKMSSeal) Decrypt(ctx context.Context, in *physical.EncryptedBlobInfo) (pt []byte, err error) { - defer func(now time.Time) { - metrics.MeasureSince([]string{"seal", "decrypt", "time"}, now) - metrics.MeasureSince([]string{"seal", "gcpckms", "decrypt", "time"}, now) - - if err != nil { - metrics.IncrCounter([]string{"seal", "decrypt", "error"}, 1) - metrics.IncrCounter([]string{"seal", "gcpckms", "decrypt", "error"}, 1) - } - }(time.Now()) - - metrics.IncrCounter([]string{"seal", "decrypt"}, 1) - metrics.IncrCounter([]string{"seal", "gcpckms", "decrypt"}, 1) - +func (s *Wrapper) Decrypt(ctx context.Context, in *wrapping.EncryptedBlobInfo, aad []byte) (pt []byte, err error) { if in.Ciphertext == nil { return nil, fmt.Errorf("given ciphertext for decryption is nil") } // Default to mechanism used before key info was stored if in.KeyInfo == nil { - in.KeyInfo = &physical.SealKeyInfo{ + in.KeyInfo = &wrapping.KeyInfo{ Mechanism: GCPKMSEncrypt, } } @@ -269,7 +249,7 @@ func (s *GCPCKMSSeal) Decrypt(ctx context.Context, in *physical.EncryptedBlobInf Ciphertext: in.Ciphertext, }) if err != nil { - return nil, errwrap.Wrapf("failed to decrypt data: {{err}}", err) + return nil, fmt.Errorf("failed to decrypt data: %w", err) } plaintext = resp.Plaintext @@ -280,17 +260,17 @@ func (s *GCPCKMSSeal) Decrypt(ctx context.Context, in *physical.EncryptedBlobInf Ciphertext: in.KeyInfo.WrappedKey, }) if err != nil { - return nil, errwrap.Wrapf("failed to decrypt envelope: {{err}}", err) + return nil, fmt.Errorf("failed to decrypt envelope: %w", err) } - envInfo := &seal.EnvelopeInfo{ + envInfo := &wrapping.EnvelopeInfo{ Key: resp.Plaintext, IV: in.IV, Ciphertext: in.Ciphertext, } - plaintext, err = seal.NewEnvelope().Decrypt(envInfo) + plaintext, err = wrapping.NewEnvelope(nil).Decrypt(envInfo, aad) if err != nil { - return nil, errwrap.Wrapf("error decrypting data with envelope: {{err}}", err) + return nil, fmt.Errorf("error decrypting data with envelope: %w", err) } default: @@ -300,13 +280,13 @@ func (s *GCPCKMSSeal) Decrypt(ctx context.Context, in *physical.EncryptedBlobInf return plaintext, nil } -func (s *GCPCKMSSeal) getClient() (*cloudkms.KeyManagementClient, error) { +func (s *Wrapper) getClient() (*cloudkms.KeyManagementClient, error) { client, err := cloudkms.NewKeyManagementClient(context.Background(), option.WithCredentialsFile(s.credsPath), - option.WithUserAgent(useragent.String()), + option.WithUserAgent(s.userAgent), ) if err != nil { - return nil, errwrap.Wrapf("failed to create KMS client: {{err}}", err) + return nil, fmt.Errorf("failed to create KMS client: %w", err) } return client, nil diff --git a/vault/seal/ocikms/ocikms.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/ocikms.go similarity index 56% rename from vault/seal/ocikms/ocikms.go rename to vendor/github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/ocikms.go index adcf942e8e..b72f7ece71 100644 --- a/vault/seal/ocikms/ocikms.go +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/ocikms.go @@ -4,6 +4,7 @@ package ocikms import ( "context" "encoding/base64" + "errors" "fmt" "math" "os" @@ -11,11 +12,7 @@ import ( "sync/atomic" "time" - "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/vault/seal" + wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/oracle/oci-go-sdk/common" "github.com/oracle/oci-go-sdk/common/auth" "github.com/oracle/oci-go-sdk/keymanagement" @@ -23,11 +20,14 @@ import ( const ( // OCI KMS key ID to use for encryption and decryption - EnvOCIKMSSealKeyID = "VAULT_OCIKMS_SEAL_KEY_ID" + EnvOCIKMSWrapperKeyID = "OCIKMS_WRAPPER_KEY_ID" + EnvVaultOCIKMSSealKeyID = "VAULT_OCIKMS_SEAL_KEY_ID" // OCI KMS crypto endpoint to use for encryption and decryption - EnvOCIKMSCryptoEndpoint = "VAULT_OCIKMS_CRYPTO_ENDPOINT" + EnvOCIKMSWrapperCryptoEndpoint = "OCIKMS_WRAPPER_CRYPTO_ENDPOINT" + EnvVaultOCIKMSSealCryptoEndpoint = "VAULT_OCIKMS_CRYPTO_ENDPOINT" // OCI KMS management endpoint to manage keys - EnvOCIKMSManagementEndpoint = "VAULT_OCIKMS_MANAGEMENT_ENDPOINT" + EnvOCIKMSWrapperManagementEndpoint = "OCIKMS_WRAPPER_MANAGEMENT_ENDPOINT" + EnvVaultOCIKMSSealManagementEndpoint = "VAULT_OCIKMS_MANAGEMENT_ENDPOINT" // Maximum number of retries KMSMaximumNumberOfRetries = 5 // keyID config @@ -40,20 +40,7 @@ const ( KMSConfigAuthTypeAPIKey = "auth_type_api_key" ) -var ( - metricInit = []string{"ocikms", "init"} - metricEncrypt = []string{"ocikms", "encrypt"} - metricDecrypt = []string{"ocikms", "decrypt"} - - metricInitFailed = []string{"ocikms", "initFailed"} - metricEncryptFailed = []string{"ocikms", "encryptFailed"} - metricDecryptFailed = []string{"ocikms", "decryptFailed"} -) - -// OCIKMSMechanism is the method used to encrypt/decrypt in auto unseal process -type OCIKMSMechanism uint32 - -type OCIKMSSeal struct { +type Wrapper struct { authTypeAPIKey bool // true for user principal, false for instance principal, default is false keyID string // OCI KMS keyID @@ -64,63 +51,61 @@ type OCIKMSSeal struct { managementClient *keymanagement.KmsManagementClient // OCI KMS management client currentKeyID *atomic.Value // Current key version which is used for encryption/decryption - - logger log.Logger } -var _ seal.Access = (*OCIKMSSeal)(nil) +var _ wrapping.Wrapper = (*Wrapper)(nil) -// NewSeal creates a new OCIKMSSeal seal with the provided logger -func NewSeal(logger log.Logger) *OCIKMSSeal { - k := &OCIKMSSeal{ - logger: logger, +// NewWrapper creates a new Wrapper seal with the provided logger +func NewWrapper(opts *wrapping.WrapperOptions) *Wrapper { + if opts == nil { + opts = new(wrapping.WrapperOptions) + } + k := &Wrapper{ currentKeyID: new(atomic.Value), } k.currentKeyID.Store("") return k } -func (k *OCIKMSSeal) SetConfig(config map[string]string) (map[string]string, error) { - defer metrics.MeasureSince(metricInit, time.Now()) +func (k *Wrapper) SetConfig(config map[string]string) (map[string]string, error) { if config == nil { config = map[string]string{} } // Check and set KeyID switch { - case os.Getenv(EnvOCIKMSSealKeyID) != "": - k.keyID = os.Getenv(EnvOCIKMSSealKeyID) + case os.Getenv(EnvOCIKMSWrapperKeyID) != "": + k.keyID = os.Getenv(EnvOCIKMSWrapperKeyID) + case os.Getenv(EnvVaultOCIKMSSealKeyID) != "": + k.keyID = os.Getenv(EnvVaultOCIKMSSealKeyID) case config[KMSConfigKeyID] != "": k.keyID = config[KMSConfigKeyID] default: - metrics.IncrCounter(metricInitFailed, 1) return nil, fmt.Errorf("'%s' not found for OCI KMS seal configuration", KMSConfigKeyID) } - k.logger.Info("OCI KMS configuration", KMSConfigKeyID, k.keyID) - // Check and set cryptoEndpoint switch { - case os.Getenv(EnvOCIKMSCryptoEndpoint) != "": - k.cryptoEndpoint = os.Getenv(EnvOCIKMSCryptoEndpoint) + case os.Getenv(EnvOCIKMSWrapperCryptoEndpoint) != "": + k.cryptoEndpoint = os.Getenv(EnvOCIKMSWrapperCryptoEndpoint) + case os.Getenv(EnvVaultOCIKMSSealCryptoEndpoint) != "": + k.cryptoEndpoint = os.Getenv(EnvVaultOCIKMSSealCryptoEndpoint) case config[KMSConfigCryptoEndpoint] != "": k.cryptoEndpoint = config[KMSConfigCryptoEndpoint] default: - metrics.IncrCounter(metricInitFailed, 1) return nil, fmt.Errorf("'%s' not found for OCI KMS seal configuration", KMSConfigCryptoEndpoint) } - k.logger.Info("OCI KMS configuration", KMSConfigCryptoEndpoint, k.cryptoEndpoint) // Check and set managementEndpoint switch { - case os.Getenv(EnvOCIKMSManagementEndpoint) != "": - k.managementEndpoint = os.Getenv(EnvOCIKMSManagementEndpoint) + case os.Getenv(EnvOCIKMSWrapperManagementEndpoint) != "": + k.managementEndpoint = os.Getenv(EnvOCIKMSWrapperManagementEndpoint) + case os.Getenv(EnvVaultOCIKMSSealManagementEndpoint) != "": + k.managementEndpoint = os.Getenv(EnvVaultOCIKMSSealManagementEndpoint) case config[KMSConfigManagementEndpoint] != "": k.managementEndpoint = config[KMSConfigManagementEndpoint] default: - metrics.IncrCounter(metricInitFailed, 1) return nil, fmt.Errorf("'%s' not found for OCI KMS seal configuration", KMSConfigManagementEndpoint) } - k.logger.Info("OCI KMS configuration", KMSConfigManagementEndpoint, k.managementEndpoint) // Check and set authTypeAPIKey var err error @@ -129,22 +114,15 @@ func (k *OCIKMSSeal) SetConfig(config map[string]string) (map[string]string, err if authTypeAPIKeyStr != "" { k.authTypeAPIKey, err = strconv.ParseBool(authTypeAPIKeyStr) if err != nil { - metrics.IncrCounter(metricInitFailed, 1) - return nil, errwrap.Wrapf("failed parsing "+KMSConfigAuthTypeAPIKey+" parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing "+KMSConfigAuthTypeAPIKey+" parameter: %w", err) } } - if k.authTypeAPIKey { - k.logger.Info("using OCI KMS with user principal") - } else { - k.logger.Info("using OCI KMS with instance principal") - } // Check and set OCI KMS crypto client if k.cryptoClient == nil { kmsCryptoClient, err := k.getOCIKMSCryptoClient() if err != nil { - metrics.IncrCounter(metricInitFailed, 1) - return nil, errwrap.Wrapf("error initializing OCI KMS client: {{err}}", err) + return nil, fmt.Errorf("error initializing OCI KMS client: %w", err) } k.cryptoClient = kmsCryptoClient } @@ -153,61 +131,63 @@ func (k *OCIKMSSeal) SetConfig(config map[string]string) (map[string]string, err if k.managementClient == nil { kmsManagementClient, err := k.getOCIKMSManagementClient() if err != nil { - metrics.IncrCounter(metricInitFailed, 1) - return nil, errwrap.Wrapf("error initializing OCI KMS client: {{err}}", err) + return nil, fmt.Errorf("error initializing OCI KMS client: %w", err) } k.managementClient = kmsManagementClient } // Calling Encrypt method with empty string just to validate keyId access and store current keyVersion - encryptedBlobInfo, err := k.Encrypt(context.Background(), []byte("")) + encryptedBlobInfo, err := k.Encrypt(context.Background(), []byte(""), nil) if err != nil || encryptedBlobInfo == nil { - metrics.IncrCounter(metricInitFailed, 1) - return nil, errwrap.Wrapf("failed "+KMSConfigKeyID+" validation: {{err}}", err) + return nil, fmt.Errorf("failed "+KMSConfigKeyID+" validation: %w", err) } - k.logger.Info("successfully validated " + KMSConfigKeyID) // Map that holds non-sensitive configuration info - sealInfo := make(map[string]string) - sealInfo[KMSConfigKeyID] = k.keyID - sealInfo[KMSConfigCryptoEndpoint] = k.cryptoEndpoint - sealInfo[KMSConfigManagementEndpoint] = k.managementEndpoint + wrapperInfo := make(map[string]string) + wrapperInfo[KMSConfigKeyID] = k.keyID + wrapperInfo[KMSConfigCryptoEndpoint] = k.cryptoEndpoint + wrapperInfo[KMSConfigManagementEndpoint] = k.managementEndpoint + if k.authTypeAPIKey { + wrapperInfo["principal_type"] = "user" + } else { + wrapperInfo["principal_type"] = "instance" + } - return sealInfo, nil + return wrapperInfo, nil } -func (k *OCIKMSSeal) SealType() string { - return seal.OCIKMS +func (k *Wrapper) Type() string { + return wrapping.OCIKMS } -func (k *OCIKMSSeal) KeyID() string { +func (k *Wrapper) KeyID() string { return k.currentKeyID.Load().(string) } -func (k *OCIKMSSeal) Init(context.Context) error { +func (k *Wrapper) HMACKeyID() string { + return "" +} + +func (k *Wrapper) Init(context.Context) error { return nil } -func (k *OCIKMSSeal) Finalize(context.Context) error { +func (k *Wrapper) Finalize(context.Context) error { return nil } -func (k *OCIKMSSeal) Encrypt(ctx context.Context, plaintext []byte) (*physical.EncryptedBlobInfo, error) { - defer metrics.MeasureSince(metricEncrypt, time.Now()) +func (k *Wrapper) Encrypt(ctx context.Context, plaintext, aad []byte) (*wrapping.EncryptedBlobInfo, error) { if plaintext == nil { - metrics.IncrCounter(metricEncryptFailed, 1) - return nil, fmt.Errorf("given plaintext for encryption is nil") + return nil, errors.New("given plaintext for encryption is nil") } - env, err := seal.NewEnvelope().Encrypt(plaintext) + env, err := wrapping.NewEnvelope(nil).Encrypt(plaintext, aad) if err != nil { - metrics.IncrCounter(metricEncryptFailed, 1) - return nil, errwrap.Wrapf("error wrapping data: {{err}}", err) + return nil, fmt.Errorf("error wrapping data: %w", err) } if k.cryptoClient == nil { - metrics.IncrCounter(metricEncryptFailed, 1) - return nil, fmt.Errorf("nil client") + return nil, errors.New("nil client") } // OCI KMS required base64 encrypted plain text before sending to the service @@ -226,36 +206,32 @@ func (k *OCIKMSSeal) Encrypt(ctx context.Context, plaintext []byte) (*physical.E } output, err := k.cryptoClient.Encrypt(ctx, input) if err != nil { - metrics.IncrCounter(metricEncryptFailed, 1) - return nil, errwrap.Wrapf("error encrypting data: {{err}}", err) + return nil, fmt.Errorf("error encrypting data: %w", err) } // Note: It is potential a timing issue if the key gets rotated between this // getCurrentKeyVersion operation and above Encrypt operation keyVersion, err := k.getCurrentKeyVersion() if err != nil { - metrics.IncrCounter(metricEncryptFailed, 1) - return nil, errwrap.Wrapf("error getting current key version: {{err}}", err) + return nil, fmt.Errorf("error getting current key version: %w", err) } // Update key version k.currentKeyID.Store(keyVersion) - ret := &physical.EncryptedBlobInfo{ + ret := &wrapping.EncryptedBlobInfo{ Ciphertext: env.Ciphertext, IV: env.IV, - KeyInfo: &physical.SealKeyInfo{ + KeyInfo: &wrapping.KeyInfo{ // Storing current key version in case we want to re-wrap older entries KeyID: keyVersion, WrappedKey: []byte(*output.Ciphertext), }, } - k.logger.Debug("successfully encrypted") return ret, nil } -func (k *OCIKMSSeal) Decrypt(ctx context.Context, in *physical.EncryptedBlobInfo) ([]byte, error) { - defer metrics.MeasureSince(metricDecrypt, time.Now()) +func (k *Wrapper) Decrypt(ctx context.Context, in *wrapping.EncryptedBlobInfo, aad []byte) ([]byte, error) { if in == nil { return nil, fmt.Errorf("given input for decryption is nil") } @@ -272,29 +248,27 @@ func (k *OCIKMSSeal) Decrypt(ctx context.Context, in *physical.EncryptedBlobInfo } output, err := k.cryptoClient.Decrypt(ctx, input) if err != nil { - metrics.IncrCounter(metricDecryptFailed, 1) - return nil, errwrap.Wrapf("error decrypting data: {{err}}", err) + return nil, fmt.Errorf("error decrypting data: %w", err) } - envelopKey, err := base64.StdEncoding.DecodeString(*output.Plaintext) + envelopeKey, err := base64.StdEncoding.DecodeString(*output.Plaintext) if err != nil { - metrics.IncrCounter(metricDecryptFailed, 1) - return nil, errwrap.Wrapf("error base64 decrypting data: {{err}}", err) + return nil, fmt.Errorf("error base64 decrypting data: %w", err) } - envInfo := &seal.EnvelopeInfo{ - Key: envelopKey, + envInfo := &wrapping.EnvelopeInfo{ + Key: envelopeKey, IV: in.IV, Ciphertext: in.Ciphertext, } - plaintext, err := seal.NewEnvelope().Decrypt(envInfo) + plaintext, err := wrapping.NewEnvelope(nil).Decrypt(envInfo, aad) if err != nil { - return nil, errwrap.Wrapf("error decrypting data: {{err}}", err) + return nil, fmt.Errorf("error decrypting data: %w", err) } return plaintext, nil } -func (k *OCIKMSSeal) getConfigProvider() (common.ConfigurationProvider, error) { +func (k *Wrapper) getConfigProvider() (common.ConfigurationProvider, error) { var cp common.ConfigurationProvider var err error if k.authTypeAPIKey { @@ -302,46 +276,46 @@ func (k *OCIKMSSeal) getConfigProvider() (common.ConfigurationProvider, error) { } else { cp, err = auth.InstancePrincipalConfigurationProvider() if err != nil { - return nil, errwrap.Wrapf("failed creating InstancePrincipalConfigurationProvider: {{err}}", err) + return nil, fmt.Errorf("failed creating InstancePrincipalConfigurationProvider: %w", err) } } return cp, nil } // Build OCI KMS crypto client -func (k *OCIKMSSeal) getOCIKMSCryptoClient() (*keymanagement.KmsCryptoClient, error) { +func (k *Wrapper) getOCIKMSCryptoClient() (*keymanagement.KmsCryptoClient, error) { cp, err := k.getConfigProvider() if err != nil { - return nil, errwrap.Wrapf("failed creating configuration provider: {{err}}", err) + return nil, fmt.Errorf("failed creating configuration provider: %w", err) } // Build crypto client kmsCryptoClient, err := keymanagement.NewKmsCryptoClientWithConfigurationProvider(cp, k.cryptoEndpoint) if err != nil { - return nil, errwrap.Wrapf("failed creating NewKmsCryptoClientWithConfigurationProvider: {{err}}", err) + return nil, fmt.Errorf("failed creating NewKmsCryptoClientWithConfigurationProvider: %w", err) } return &kmsCryptoClient, nil } // Build OCI KMS management client -func (k *OCIKMSSeal) getOCIKMSManagementClient() (*keymanagement.KmsManagementClient, error) { +func (k *Wrapper) getOCIKMSManagementClient() (*keymanagement.KmsManagementClient, error) { cp, err := k.getConfigProvider() if err != nil { - return nil, errwrap.Wrapf("failed creating configuration provider: {{err}}", err) + return nil, fmt.Errorf("failed creating configuration provider: %w", err) } // Build crypto client kmsManagementClient, err := keymanagement.NewKmsManagementClientWithConfigurationProvider(cp, k.managementEndpoint) if err != nil { - return nil, errwrap.Wrapf("failed creating NewKmsCryptoClientWithConfigurationProvider: {{err}}", err) + return nil, fmt.Errorf("failed creating NewKmsCryptoClientWithConfigurationProvider: %w", err) } return &kmsManagementClient, nil } // Request metadata includes retry policy -func (k *OCIKMSSeal) getRequestMetadata() common.RequestMetadata { +func (k *Wrapper) getRequestMetadata() common.RequestMetadata { // Only retry for 5xx errors retryOn5xxFunc := func(r common.OCIOperationResponse) bool { return r.Error != nil && r.Response.HTTPResponse().StatusCode >= 500 @@ -364,7 +338,7 @@ func getExponentialBackoffRetryPolicy(n uint, fn func(r common.OCIOperationRespo return &policy } -func (k *OCIKMSSeal) getCurrentKeyVersion() (string, error) { +func (k *Wrapper) getCurrentKeyVersion() (string, error) { if k.managementClient == nil { return "", fmt.Errorf("managementClient has not yet initialized") } @@ -375,7 +349,7 @@ func (k *OCIKMSSeal) getCurrentKeyVersion() (string, error) { } getKeyResponse, err := k.managementClient.GetKey(context.Background(), getKeyInput) if err != nil || getKeyResponse.CurrentKeyVersion == nil { - return "", errwrap.Wrapf("failed getting current key version: {{err}}", err) + return "", fmt.Errorf("failed getting current key version: %w", err) } return *getKeyResponse.CurrentKeyVersion, nil diff --git a/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/transit/transit.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/transit/transit.go new file mode 100644 index 0000000000..57cc72e26d --- /dev/null +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/transit/transit.go @@ -0,0 +1,109 @@ +package transit + +import ( + "context" + "errors" + "strings" + "sync/atomic" + + "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping" +) + +// Wrapper is a wrapper that leverages Vault's Transit secret +// engine +type Wrapper struct { + logger hclog.Logger + client transitClientEncryptor + currentKeyID *atomic.Value +} + +var _ wrapping.Wrapper = (*Wrapper)(nil) + +// NewWrapper creates a new transit wrapper +func NewWrapper(opts *wrapping.WrapperOptions) *Wrapper { + if opts == nil { + opts = new(wrapping.WrapperOptions) + } + s := &Wrapper{ + logger: opts.Logger, + currentKeyID: new(atomic.Value), + } + s.currentKeyID.Store("") + return s +} + +// SetConfig processes the config info from the server config +func (s *Wrapper) SetConfig(config map[string]string) (map[string]string, error) { + client, wrapperInfo, err := newTransitClient(s.logger, config) + if err != nil { + return nil, err + } + s.client = client + + // Send a value to test the wrapper and to set the current key id + if _, err := s.Encrypt(context.Background(), []byte("a"), nil); err != nil { + client.Close() + return nil, err + } + + return wrapperInfo, nil +} + +// Init is called during core.Initialize +func (s *Wrapper) Init(_ context.Context) error { + return nil +} + +// Finalize is called during shutdown +func (s *Wrapper) Finalize(_ context.Context) error { + s.client.Close() + return nil +} + +// Type returns the type for this particular Wrapper implementation +func (s *Wrapper) Type() string { + return wrapping.Transit +} + +// KeyID returns the last known key id +func (s *Wrapper) KeyID() string { + return s.currentKeyID.Load().(string) +} + +// HMACKeyID returns the last known HMAC key id +func (s *Wrapper) HMACKeyID() string { + return "" +} + +// Encrypt is used to encrypt using Vault's Transit engine +func (s *Wrapper) Encrypt(_ context.Context, plaintext, aad []byte) (blob *wrapping.EncryptedBlobInfo, err error) { + ciphertext, err := s.client.Encrypt(plaintext) + if err != nil { + return nil, err + } + + splitKey := strings.Split(string(ciphertext), ":") + if len(splitKey) != 3 { + return nil, errors.New("invalid ciphertext returned") + } + keyID := splitKey[1] + s.currentKeyID.Store(keyID) + + ret := &wrapping.EncryptedBlobInfo{ + Ciphertext: ciphertext, + KeyInfo: &wrapping.KeyInfo{ + KeyID: keyID, + }, + } + return ret, nil +} + +// Decrypt is used to decrypt the ciphertext +func (s *Wrapper) Decrypt(_ context.Context, in *wrapping.EncryptedBlobInfo, _ []byte) (pt []byte, err error) { + plaintext, err := s.client.Decrypt(in.Ciphertext) + if err != nil { + return nil, err + } + return plaintext, nil +} diff --git a/vault/seal/transit/transit_client.go b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/transit/transit_client.go similarity index 70% rename from vault/seal/transit/transit_client.go rename to vendor/github.com/hashicorp/go-kms-wrapping/wrappers/transit/transit_client.go index 48066ba6a6..0415b1cc92 100644 --- a/vault/seal/transit/transit_client.go +++ b/vendor/github.com/hashicorp/go-kms-wrapping/wrappers/transit/transit_client.go @@ -7,10 +7,21 @@ import ( "path" "strconv" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" ) +const ( + EnvTransitWrapperMountPath = "TRANSIT_WRAPPER_MOUNT_PATH" + EnvVaultTransitSealMountPath = "VAULT_TRANSIT_SEAL_MOUNT_PATH" + + EnvTransitWrapperKeyName = "TRANSIT_WRAPPER_KEY_NAME" + EnvVaultTransitSealKeyName = "VAULT_TRANSIT_SEAL_KEY_NAME" + + EnvTransitWrapperDisableRenewal = "TRANSIT_WRAPPER_DISABLE_RENEWAL" + EnvVaultTransitSealDisableRenewal = "VAULT_TRANSIT_SEAL_DISABLE_RENEWAL" +) + type transitClientEncryptor interface { Close() Encrypt(plaintext []byte) (ciphertext []byte, err error) @@ -18,22 +29,24 @@ type transitClientEncryptor interface { } type transitClient struct { - client *api.Client - renewer *api.Renewer + client *api.Client + lifetimeWatcher *api.Renewer mountPath string keyName string } -func newTransitClient(logger log.Logger, config map[string]string) (*transitClient, map[string]string, error) { +func newTransitClient(logger hclog.Logger, config map[string]string) (*transitClient, map[string]string, error) { if config == nil { config = map[string]string{} } var mountPath, keyName string switch { - case os.Getenv("VAULT_TRANSIT_SEAL_MOUNT_PATH") != "": - mountPath = os.Getenv("VAULT_TRANSIT_SEAL_MOUNT_PATH") + case os.Getenv(EnvTransitWrapperMountPath) != "": + mountPath = os.Getenv(EnvTransitWrapperMountPath) + case os.Getenv(EnvVaultTransitSealMountPath) != "": + mountPath = os.Getenv(EnvVaultTransitSealMountPath) case config["mount_path"] != "": mountPath = config["mount_path"] default: @@ -41,8 +54,10 @@ func newTransitClient(logger log.Logger, config map[string]string) (*transitClie } switch { - case os.Getenv("VAULT_TRANSIT_SEAL_KEY_NAME") != "": - keyName = os.Getenv("VAULT_TRANSIT_SEAL_KEY_NAME") + case os.Getenv(EnvTransitWrapperKeyName) != "": + keyName = os.Getenv(EnvTransitWrapperKeyName) + case os.Getenv(EnvVaultTransitSealKeyName) != "": + keyName = os.Getenv(EnvVaultTransitSealKeyName) case config["key_name"] != "": keyName = config["key_name"] default: @@ -52,8 +67,10 @@ func newTransitClient(logger log.Logger, config map[string]string) (*transitClie var disableRenewal bool var disableRenewalRaw string switch { - case os.Getenv("VAULT_TRANSIT_SEAL_DISABLE_RENEWAL") != "": - disableRenewalRaw = os.Getenv("VAULT_TRANSIT_SEAL_DISABLE_RENEWAL") + case os.Getenv(EnvTransitWrapperDisableRenewal) != "": + disableRenewalRaw = os.Getenv(EnvTransitWrapperDisableRenewal) + case os.Getenv(EnvVaultTransitSealDisableRenewal) != "": + disableRenewalRaw = os.Getenv(EnvVaultTransitSealDisableRenewal) case config["disable_renewal"] != "": disableRenewalRaw = config["disable_renewal"] } @@ -122,34 +139,34 @@ func newTransitClient(logger log.Logger, config map[string]string) (*transitClie } if !disableRenewal && apiClient.Token() != "" { - // Renew the token immediately to get a secret to pass to renewer + // Renew the token immediately to get a secret to pass to lifetime watcher secret, err := apiClient.Auth().Token().RenewTokenAsSelf(apiClient.Token(), 0) - // If we don't get an error renewing, set up a renewer. The token may not be renewable or not have + // If we don't get an error renewing, set up a lifetime watcher. The token may not be renewable or not have // permission to renew-self. if err == nil { - renewer, err := apiClient.NewRenewer(&api.RenewerInput{ + lifetimeWatcher, err := apiClient.NewLifetimeWatcher(&api.LifetimeWatcherInput{ Secret: secret, }) if err != nil { return nil, nil, err } - client.renewer = renewer + client.lifetimeWatcher = lifetimeWatcher go func() { for { select { - case err := <-renewer.DoneCh(): + case err := <-lifetimeWatcher.DoneCh(): logger.Info("shutting down token renewal") if err != nil { logger.Error("error renewing token", "error", err) } return - case <-renewer.RenewCh(): + case <-lifetimeWatcher.RenewCh(): logger.Trace("successfully renewed token") } } }() - go renewer.Renew() + go lifetimeWatcher.Start() } else { logger.Info("unable to renew token, disabling renewal", "err", err) } @@ -167,8 +184,8 @@ func newTransitClient(logger log.Logger, config map[string]string) (*transitClie } func (c *transitClient) Close() { - if c.renewer != nil { - c.renewer.Stop() + if c.lifetimeWatcher != nil { + c.lifetimeWatcher.Stop() } } diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-azure/go.mod b/vendor/github.com/hashicorp/vault-plugin-auth-azure/go.mod index c918abc5a0..cd65cb3f39 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-azure/go.mod +++ b/vendor/github.com/hashicorp/vault-plugin-auth-azure/go.mod @@ -3,17 +3,17 @@ module github.com/hashicorp/vault-plugin-auth-azure go 1.12 require ( - contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect - github.com/Azure/azure-sdk-for-go v29.0.0+incompatible - github.com/Azure/go-autorest v11.7.1+incompatible + github.com/Azure/azure-sdk-for-go v36.2.0+incompatible + github.com/Azure/go-autorest/autorest v0.9.2 + github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 + github.com/Azure/go-autorest/autorest/to v0.3.0 + github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect github.com/coreos/go-oidc v2.0.0+incompatible - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/dimchansky/utfbom v1.1.0 // indirect github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-cleanhttp v0.5.1 - github.com/hashicorp/go-hclog v0.8.0 - github.com/hashicorp/vault/api v1.0.5-0.20190814205728-e9c5cd8aca98 - github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b + github.com/hashicorp/go-hclog v0.9.2 + github.com/hashicorp/vault/api v1.0.5-0.20191119041037-cccda49b3962 + github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044 github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a ) diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-azure/go.sum b/vendor/github.com/hashicorp/vault-plugin-auth-azure/go.sum index 9a2e25d2c7..2ebe82c766 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-azure/go.sum +++ b/vendor/github.com/hashicorp/vault-plugin-auth-azure/go.sum @@ -1,24 +1,42 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -github.com/Azure/azure-sdk-for-go v29.0.0+incompatible h1:CYPU39ULbGjQBo3gXIqiWouK0C4F+Pt2Zx5CqGvqknE= -github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest v11.7.1+incompatible h1:M2YZIajBBVekV86x0rr1443Lc1F/Ylxb9w+5EtSyX3Q= -github.com/Azure/go-autorest v11.7.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/go-oidc v2.0.0+incompatible h1:+RStIopZ8wooMx+Vs5Bt8zMXxV1ABl5LbakNExNmZIg= github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -29,37 +47,23 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -68,21 +72,24 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 h1:PKbxRbsOP7R3f/TpdqcgXrO69T3yd9nLoR+RMRUxSxA= +github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -90,23 +97,13 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.0.5-0.20190814205728-e9c5cd8aca98 h1:LUVHA+Z7zJ5Y+m5i7K8X1q0FIrn7AISU575IQ3/b/GE= -github.com/hashicorp/vault/api v1.0.5-0.20190814205728-e9c5cd8aca98/go.mod h1:t4IAg1Is4bLUtTq8cGgeUh0I8oDRBXPk2bM1Jvg/nWA= -github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b h1:uC3aN7xIG8gPNm9cbNY05OJ44cYfAv5Rn+QLSBsFq1s= -github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/vault/api v1.0.5-0.20191119041037-cccda49b3962 h1:lAA0cNXvvyj0HdBtx0UeK/TlvAQrdhFwwVCuoc4M3A0= +github.com/hashicorp/vault/api v1.0.5-0.20191119041037-cccda49b3962/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= +github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044 h1:bXjbz4PFfOoMUrqe9upVa0SbJ2RqfbLzh4eprst/b40= +github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -121,58 +118,41 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU= github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= @@ -180,7 +160,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -192,11 +171,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= @@ -206,40 +180,22 @@ golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+s golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-azure/go.mod b/vendor/github.com/hashicorp/vault-plugin-secrets-azure/go.mod index ff8f1b6693..ea45fb146f 100644 --- a/vendor/github.com/hashicorp/vault-plugin-secrets-azure/go.mod +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-azure/go.mod @@ -3,16 +3,17 @@ module github.com/hashicorp/vault-plugin-secrets-azure go 1.12 require ( - contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect - github.com/Azure/azure-sdk-for-go v29.0.0+incompatible - github.com/Azure/go-autorest v11.7.1+incompatible - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/dimchansky/utfbom v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go v36.2.0+incompatible + github.com/Azure/go-autorest/autorest v0.9.2 + github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 + github.com/Azure/go-autorest/autorest/date v0.2.0 + github.com/Azure/go-autorest/autorest/to v0.3.0 + github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 github.com/hashicorp/errwrap v1.0.0 - github.com/hashicorp/go-hclog v0.8.0 + github.com/hashicorp/go-hclog v0.9.2 github.com/hashicorp/go-multierror v1.0.0 - github.com/hashicorp/go-uuid v1.0.1 - github.com/hashicorp/vault/api v1.0.5-0.20190814205728-e9c5cd8aca98 - github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b + github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 + github.com/hashicorp/vault/api v1.0.5-0.20191119041037-cccda49b3962 + github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044 ) diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-azure/go.sum b/vendor/github.com/hashicorp/vault-plugin-secrets-azure/go.sum index ee36fa60b1..ceb0714795 100644 --- a/vendor/github.com/hashicorp/vault-plugin-secrets-azure/go.sum +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-azure/go.sum @@ -1,24 +1,41 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -github.com/Azure/azure-sdk-for-go v29.0.0+incompatible h1:CYPU39ULbGjQBo3gXIqiWouK0C4F+Pt2Zx5CqGvqknE= -github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest v11.7.1+incompatible h1:M2YZIajBBVekV86x0rr1443Lc1F/Ylxb9w+5EtSyX3Q= -github.com/Azure/go-autorest v11.7.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -27,37 +44,23 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 h1:28FVBuwkwowZMjbA7M0wXsI6t3PYulRTMio3SO+eKCM= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -66,21 +69,24 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 h1:PKbxRbsOP7R3f/TpdqcgXrO69T3yd9nLoR+RMRUxSxA= +github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -88,21 +94,13 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.0.5-0.20190814205728-e9c5cd8aca98 h1:LUVHA+Z7zJ5Y+m5i7K8X1q0FIrn7AISU575IQ3/b/GE= -github.com/hashicorp/vault/api v1.0.5-0.20190814205728-e9c5cd8aca98/go.mod h1:t4IAg1Is4bLUtTq8cGgeUh0I8oDRBXPk2bM1Jvg/nWA= -github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b h1:uC3aN7xIG8gPNm9cbNY05OJ44cYfAv5Rn+QLSBsFq1s= -github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/vault/api v1.0.5-0.20191119041037-cccda49b3962 h1:lAA0cNXvvyj0HdBtx0UeK/TlvAQrdhFwwVCuoc4M3A0= +github.com/hashicorp/vault/api v1.0.5-0.20191119041037-cccda49b3962/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= +github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044 h1:bXjbz4PFfOoMUrqe9upVa0SbJ2RqfbLzh4eprst/b40= +github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -117,56 +115,38 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -175,7 +155,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -185,11 +164,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= @@ -199,39 +173,21 @@ golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+s golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/backend.go b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/backend.go index d0dcd9265c..93a23bed52 100644 --- a/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/backend.go +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/backend.go @@ -69,6 +69,7 @@ func Backend() *backend { pathsRoleSet(b), []*framework.Path{ pathConfig(b), + pathConfigRotateRoot(b), pathSecretAccessToken(b), pathSecretServiceAccountKey(b), }, diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/iamutil/iam_handle.go b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/iamutil/iam_handle.go index 37faedeea6..63700bef1e 100644 --- a/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/iamutil/iam_handle.go +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/iamutil/iam_handle.go @@ -6,7 +6,6 @@ import ( "net/http" "github.com/hashicorp/errwrap" - "google.golang.org/api/gensupport" "google.golang.org/api/googleapi" ) @@ -54,18 +53,11 @@ func (h *IamHandle) doRequest(ctx context.Context, req *http.Request, out interf req.Header.Set("User-Agent", h.userAgent) } - resp, err := gensupport.SendRequest(ctx, h.c, req) - defer googleapi.CloseBody(resp) - - if resp != nil && resp.StatusCode == http.StatusNotModified { - return &googleapi.Error{ - Code: resp.StatusCode, - Header: resp.Header, - } - } + resp, err := h.c.Do(req.WithContext(ctx)) if err != nil { return err } + defer googleapi.CloseBody(resp) if err := googleapi.CheckResponse(resp); err != nil { return err diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/path_config_rotate_root.go b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/path_config_rotate_root.go new file mode 100644 index 0000000000..82e2997b65 --- /dev/null +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/path_config_rotate_root.go @@ -0,0 +1,133 @@ +package gcpsecrets + +import ( + "context" + "encoding/base64" + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-gcp-common/gcputil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "google.golang.org/api/iam/v1" +) + +func pathConfigRotateRoot(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/rotate-root", + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigRotateRootWrite, + }, + }, + + HelpSynopsis: pathConfigRotateRootHelpSyn, + HelpDescription: pathConfigRotateRootHelpDesc, + } +} + +func (b *backend) pathConfigRotateRootWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get the current configuration + cfg, err := getConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if cfg == nil { + return nil, fmt.Errorf("no configuration") + } + if cfg.CredentialsRaw == "" { + return nil, fmt.Errorf("configuration does not have credentials - this " + + "endpoint only works with user-provided JSON credentials explicitly " + + "provided via the config/ endpoint") + } + + // Parse the credential JSON to extract the email (we need it for the API + // call) + creds, err := gcputil.Credentials(cfg.CredentialsRaw) + if err != nil { + return nil, errwrap.Wrapf("credentials are invalid: {{err}}", err) + } + + // Generate a new service account key + iamAdmin, err := b.IAMAdminClient(req.Storage) + if err != nil { + return nil, errwrap.Wrapf("failed to create iam client: {{err}}", err) + } + + saName := "projects/-/serviceAccounts/" + creds.ClientEmail + newKey, err := iamAdmin.Projects.ServiceAccounts.Keys. + Create(saName, &iam.CreateServiceAccountKeyRequest{ + KeyAlgorithm: keyAlgorithmRSA2k, + PrivateKeyType: privateKeyTypeJson, + }). + Context(ctx). + Do() + if err != nil { + return nil, errwrap.Wrapf("failed to create new key: {{err}}", err) + } + + // Base64-decode the private key data (it's the JSON file) + newCredsJSON, err := base64.StdEncoding.DecodeString(newKey.PrivateKeyData) + if err != nil { + return nil, errwrap.Wrapf("failed to decode credentials: {{err}}", err) + } + + // Verify creds are valid + newCreds, err := gcputil.Credentials(string(newCredsJSON)) + if err != nil { + return nil, errwrap.Wrapf("api returned invalid credentials: {{err}}", err) + } + + // Update the configuration + cfg.CredentialsRaw = string(newCredsJSON) + entry, err := logical.StorageEntryJSON("config", cfg) + if err != nil { + return nil, errwrap.Wrapf("failed to generate new configuration: {{err}}", err) + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, errwrap.Wrapf("failed to save new configuration: {{err}}", err) + } + + // Clear caches to pick up the new credentials + b.ClearCaches() + + // Delete the old service account key + oldKeyName := fmt.Sprintf("projects/%s/serviceAccounts/%s/keys/%s", + creds.ProjectId, + creds.ClientEmail, + creds.PrivateKeyId) + if _, err := iamAdmin.Projects.ServiceAccounts.Keys. + Delete(oldKeyName). + Context(ctx). + Do(); err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf( + "failed to delete old service account key (%q) - the new service "+ + "account key (%q) is active, but the old one still exists: {{err}}", + creds.PrivateKeyId, newCreds.PrivateKeyId), err) + } + + // We did it! + return &logical.Response{ + Data: map[string]interface{}{ + "private_key_id": newCreds.PrivateKeyId, + }, + }, nil +} + +const pathConfigRotateRootHelpSyn = ` +Request to rotate the GCP credentials used by Vault +` + +const pathConfigRotateRootHelpDesc = ` +This path attempts to rotate the GCP service account credentials used by Vault +for this mount. It does this by generating a new key for the service account, +replacing the internal value, and then scheduling a deletion of the old service +account key. Note that it does not create a new service account, only a new +version of the service account key. + +This path is only valid if Vault has been configured to use GCP credentials via +the config/ endpoint where "credentials" were specified. Additionally, the +provided service account must have permissions to create and delete service +account keys. +` diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/path_role_set.go b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/path_role_set.go index 99a5742a24..52a59da632 100644 --- a/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/path_role_set.go +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/path_role_set.go @@ -347,11 +347,18 @@ func (b *backend) pathRoleSetCreateUpdate(ctx context.Context, req *logical.Requ // Bindings bRaw, newBindings := d.GetOk("bindings") - if len(bRaw.(string)) == 0 { - return logical.ErrorResponse("given empty bindings string"), nil + + if newBindings { + bindings, ok := bRaw.(string) + if !ok { + return logical.ErrorResponse("bindings are not a string"), nil + } + if bindings == "" { + return logical.ErrorResponse("bindings are empty"), nil + } } - if isCreate && newBindings == false { + if isCreate && !newBindings { return logical.ErrorResponse("bindings are required for new role set"), nil } diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/secrets_service_account_key.go b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/secrets_service_account_key.go index bbbb245de9..fb09f7e4c7 100644 --- a/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/secrets_service_account_key.go +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-gcp/plugin/secrets_service_account_key.go @@ -3,6 +3,7 @@ package gcpsecrets import ( "context" "fmt" + "time" "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/framework" @@ -57,6 +58,10 @@ func pathSecretServiceAccountKey(b *backend) *framework.Path { Description: fmt.Sprintf(`Private key type for service account key - defaults to %s"`, privateKeyTypeJson), Default: privateKeyTypeJson, }, + "ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: "Lifetime of the service account key", + }, }, ExistenceCheck: b.pathRoleSetExistenceCheck, Operations: map[logical.Operation]framework.OperationHandler{ @@ -72,6 +77,7 @@ func (b *backend) pathServiceAccountKey(ctx context.Context, req *logical.Reques rsName := d.Get("roleset").(string) keyType := d.Get("key_type").(string) keyAlg := d.Get("key_algorithm").(string) + ttl := d.Get("ttl").(int) rs, err := getRoleSet(rsName, ctx, req.Storage) if err != nil { @@ -85,7 +91,7 @@ func (b *backend) pathServiceAccountKey(ctx context.Context, req *logical.Reques return logical.ErrorResponse(fmt.Sprintf("role set '%s' cannot generate service account keys (has secret type %s)", rsName, rs.SecretType)), nil } - return b.getSecretKey(ctx, req.Storage, rs, keyType, keyAlg) + return b.getSecretKey(ctx, req.Storage, rs, keyType, keyAlg, ttl) } func (b *backend) secretKeyRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { @@ -167,7 +173,7 @@ func (b *backend) secretKeyRevoke(ctx context.Context, req *logical.Request, d * return nil, nil } -func (b *backend) getSecretKey(ctx context.Context, s logical.Storage, rs *RoleSet, keyType, keyAlgorithm string) (*logical.Response, error) { +func (b *backend) getSecretKey(ctx context.Context, s logical.Storage, rs *RoleSet, keyType, keyAlgorithm string, ttl int) (*logical.Response, error) { cfg, err := getConfig(ctx, s) if err != nil { return nil, errwrap.Wrapf("could not read backend config: {{err}}", err) @@ -207,9 +213,12 @@ func (b *backend) getSecretKey(ctx context.Context, s logical.Storage, rs *RoleS } resp := b.Secret(SecretTypeKey).Response(secretD, internalD) - resp.Secret.TTL = cfg.TTL - resp.Secret.MaxTTL = cfg.MaxTTL resp.Secret.Renewable = true + + if ttl > 0 { + resp.Secret.TTL = time.Duration(ttl) * time.Second + } + return resp, nil } diff --git a/vendor/github.com/hashicorp/vault/sdk/framework/backend.go b/vendor/github.com/hashicorp/vault/sdk/framework/backend.go index a92025c7d9..8a9e313b85 100644 --- a/vendor/github.com/hashicorp/vault/sdk/framework/backend.go +++ b/vendor/github.com/hashicorp/vault/sdk/framework/backend.go @@ -15,9 +15,9 @@ import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/entropy" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/license" "github.com/hashicorp/vault/sdk/helper/logging" diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/error.go b/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/error.go new file mode 100644 index 0000000000..248ab82e3d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/error.go @@ -0,0 +1,34 @@ +package awsutil + +import ( + "errors" + + awsRequest "github.com/aws/aws-sdk-go/aws/request" + multierror "github.com/hashicorp/go-multierror" +) + +var ErrUpstreamRateLimited = errors.New("upstream rate limited") + +// CheckAWSError will examine an error and convert to a logical error if +// appropriate. If no appropriate error is found, return nil +func CheckAWSError(err error) error { + // IsErrorThrottle will check if the error returned is one that matches + // known request limiting errors: + // https://github.com/aws/aws-sdk-go/blob/488d634b5a699b9118ac2befb5135922b4a77210/aws/request/retryer.go#L35 + if awsRequest.IsErrorThrottle(err) { + return ErrUpstreamRateLimited + } + return nil +} + +// AppendAWSError checks if the given error is a known AWS error we modify, +// and if so then returns a go-multierror, appending the original and the +// AWS error. +// If the error is not an AWS error, or not an error we wish to modify, then +// return the original error. +func AppendAWSError(err error) error { + if awserr := CheckAWSError(err); awserr != nil { + err = multierror.Append(err, awserr) + } + return err +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/generate_credentials.go b/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/generate_credentials.go new file mode 100644 index 0000000000..83c134a56e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/generate_credentials.go @@ -0,0 +1,84 @@ +package awsutil + +import ( + "fmt" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" +) + +type CredentialsConfig struct { + // The access key if static credentials are being used + AccessKey string + + // The secret key if static credentials are being used + SecretKey string + + // The session token if it is being used + SessionToken string + + // If specified, the region will be provided to the config of the + // EC2RoleProvider's client. This may be useful if you want to e.g. reuse + // the client elsewhere. + Region string + + // The filename for the shared credentials provider, if being used + Filename string + + // The profile for the shared credentials provider, if being used + Profile string + + // The http.Client to use, or nil for the client to use its default + HTTPClient *http.Client +} + +func (c *CredentialsConfig) GenerateCredentialChain() (*credentials.Credentials, error) { + var providers []credentials.Provider + + switch { + case c.AccessKey != "" && c.SecretKey != "": + // Add the static credential provider + providers = append(providers, &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: c.AccessKey, + SecretAccessKey: c.SecretKey, + SessionToken: c.SessionToken, + }}) + case c.AccessKey == "" && c.SecretKey == "": + // Attempt to get credentials from the IAM instance role below + + default: // Have one or the other but not both and not neither + return nil, fmt.Errorf( + "static AWS client credentials haven't been properly configured (the access key or secret key were provided but not both)") + } + + // Add the environment credential provider + providers = append(providers, &credentials.EnvProvider{}) + + // Add the shared credentials provider + providers = append(providers, &credentials.SharedCredentialsProvider{ + Filename: c.Filename, + Profile: c.Profile, + }) + + // Add the remote provider + def := defaults.Get() + if c.Region != "" { + def.Config.Region = aws.String(c.Region) + } + if c.HTTPClient != nil { + def.Config.HTTPClient = c.HTTPClient + } + + providers = append(providers, defaults.RemoteCredProvider(*def.Config, def.Handlers)) + + // Create the credentials required to access the API. + creds := credentials.NewChainCredentials(providers) + if creds == nil { + return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata") + } + + return creds, nil +} diff --git a/helper/awsutil/region.go b/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/region.go similarity index 83% rename from helper/awsutil/region.go rename to vendor/github.com/hashicorp/vault/sdk/helper/awsutil/region.go index a5954bdf63..72ba90ce5e 100644 --- a/helper/awsutil/region.go +++ b/vendor/github.com/hashicorp/vault/sdk/helper/awsutil/region.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" - hclog "github.com/hashicorp/go-hclog" ) // "us-east-1 is used because it's where AWS first provides support for new features, @@ -38,22 +37,21 @@ Our chosen approach is: This approach should be used in future updates to this logic. */ -func GetOrDefaultRegion(logger hclog.Logger, configuredRegion string) string { +func GetRegion(configuredRegion string) (string, error) { if configuredRegion != "" { - return configuredRegion + return configuredRegion, nil } sess, err := session.NewSessionWithOptions(session.Options{ SharedConfigState: session.SharedConfigEnable, }) if err != nil { - logger.Warn(fmt.Sprintf("unable to start session, defaulting region to %s", DefaultRegion)) - return DefaultRegion + return "", fmt.Errorf("got error when starting session: %w", err) } region := aws.StringValue(sess.Config.Region) if region != "" { - return region + return region, nil } metadata := ec2metadata.New(sess, &aws.Config{ @@ -64,13 +62,13 @@ func GetOrDefaultRegion(logger hclog.Logger, configuredRegion string) string { }, }) if !metadata.Available() { - return DefaultRegion + return DefaultRegion, nil } region, err = metadata.Region() if err != nil { - logger.Warn("unable to retrieve region from instance metadata, defaulting region to %s", DefaultRegion) - return DefaultRegion + return "", fmt.Errorf("unable to retrieve region from instance metadata: %w", err) } - return region + + return region, nil } diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/entry.go b/vendor/github.com/hashicorp/vault/sdk/physical/entry.go index a662dfdd33..c02744a3e1 100644 --- a/vendor/github.com/hashicorp/vault/sdk/physical/entry.go +++ b/vendor/github.com/hashicorp/vault/sdk/physical/entry.go @@ -1,5 +1,7 @@ package physical +import wrapping "github.com/hashicorp/go-kms-wrapping" + // Entry is used to represent data stored by the physical backend type Entry struct { Key string @@ -13,5 +15,5 @@ type Entry struct { // is used to carry information about whether seal wrapping is *desired* // regardless of whether it's currently available. The struct below stores // needed information when it's actually performed. - SealWrapInfo *EncryptedBlobInfo `json:"seal_wrap_info,omitempty"` + SealWrapInfo *wrapping.EncryptedBlobInfo `json:"seal_wrap_info,omitempty"` } diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/types.pb.go b/vendor/github.com/hashicorp/vault/sdk/physical/types.pb.go deleted file mode 100644 index fc9e04a430..0000000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/types.pb.go +++ /dev/null @@ -1,221 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: sdk/physical/types.proto - -package physical - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type EncryptedBlobInfo struct { - Ciphertext []byte `protobuf:"bytes,1,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` - IV []byte `protobuf:"bytes,2,opt,name=iv,proto3" json:"iv,omitempty"` - HMAC []byte `protobuf:"bytes,3,opt,name=hmac,proto3" json:"hmac,omitempty"` - Wrapped bool `protobuf:"varint,4,opt,name=wrapped,proto3" json:"wrapped,omitempty"` - KeyInfo *SealKeyInfo `protobuf:"bytes,5,opt,name=key_info,json=keyInfo,proto3" json:"key_info,omitempty"` - // Key is the Key value for the entry that corresponds to - // physical.Entry.Key's value - Key string `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EncryptedBlobInfo) Reset() { *m = EncryptedBlobInfo{} } -func (m *EncryptedBlobInfo) String() string { return proto.CompactTextString(m) } -func (*EncryptedBlobInfo) ProtoMessage() {} -func (*EncryptedBlobInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_21dce1f497d1541e, []int{0} -} - -func (m *EncryptedBlobInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EncryptedBlobInfo.Unmarshal(m, b) -} -func (m *EncryptedBlobInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EncryptedBlobInfo.Marshal(b, m, deterministic) -} -func (m *EncryptedBlobInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_EncryptedBlobInfo.Merge(m, src) -} -func (m *EncryptedBlobInfo) XXX_Size() int { - return xxx_messageInfo_EncryptedBlobInfo.Size(m) -} -func (m *EncryptedBlobInfo) XXX_DiscardUnknown() { - xxx_messageInfo_EncryptedBlobInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_EncryptedBlobInfo proto.InternalMessageInfo - -func (m *EncryptedBlobInfo) GetCiphertext() []byte { - if m != nil { - return m.Ciphertext - } - return nil -} - -func (m *EncryptedBlobInfo) GetIV() []byte { - if m != nil { - return m.IV - } - return nil -} - -func (m *EncryptedBlobInfo) GetHMAC() []byte { - if m != nil { - return m.HMAC - } - return nil -} - -func (m *EncryptedBlobInfo) GetWrapped() bool { - if m != nil { - return m.Wrapped - } - return false -} - -func (m *EncryptedBlobInfo) GetKeyInfo() *SealKeyInfo { - if m != nil { - return m.KeyInfo - } - return nil -} - -func (m *EncryptedBlobInfo) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -// SealKeyInfo contains information regarding the seal used to encrypt the entry. -type SealKeyInfo struct { - // Mechanism is the method used by the seal to encrypt and sign the - // data as defined by the seal. - Mechanism uint64 `protobuf:"varint,1,opt,name=Mechanism,proto3" json:"Mechanism,omitempty"` - HMACMechanism uint64 `protobuf:"varint,2,opt,name=HMACMechanism,proto3" json:"HMACMechanism,omitempty"` - // This is an opaque ID used by the seal to identify the specific - // key to use as defined by the seal. This could be a version, key - // label, or something else. - KeyID string `protobuf:"bytes,3,opt,name=KeyID,proto3" json:"KeyID,omitempty"` - HMACKeyID string `protobuf:"bytes,4,opt,name=HMACKeyID,proto3" json:"HMACKeyID,omitempty"` - // These value are used when generating our own data encryption keys - // and encrypting them using the autoseal - WrappedKey []byte `protobuf:"bytes,5,opt,name=WrappedKey,proto3" json:"WrappedKey,omitempty"` - // Mechanism specific flags - Flags uint64 `protobuf:"varint,6,opt,name=Flags,proto3" json:"Flags,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SealKeyInfo) Reset() { *m = SealKeyInfo{} } -func (m *SealKeyInfo) String() string { return proto.CompactTextString(m) } -func (*SealKeyInfo) ProtoMessage() {} -func (*SealKeyInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_21dce1f497d1541e, []int{1} -} - -func (m *SealKeyInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SealKeyInfo.Unmarshal(m, b) -} -func (m *SealKeyInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SealKeyInfo.Marshal(b, m, deterministic) -} -func (m *SealKeyInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SealKeyInfo.Merge(m, src) -} -func (m *SealKeyInfo) XXX_Size() int { - return xxx_messageInfo_SealKeyInfo.Size(m) -} -func (m *SealKeyInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SealKeyInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SealKeyInfo proto.InternalMessageInfo - -func (m *SealKeyInfo) GetMechanism() uint64 { - if m != nil { - return m.Mechanism - } - return 0 -} - -func (m *SealKeyInfo) GetHMACMechanism() uint64 { - if m != nil { - return m.HMACMechanism - } - return 0 -} - -func (m *SealKeyInfo) GetKeyID() string { - if m != nil { - return m.KeyID - } - return "" -} - -func (m *SealKeyInfo) GetHMACKeyID() string { - if m != nil { - return m.HMACKeyID - } - return "" -} - -func (m *SealKeyInfo) GetWrappedKey() []byte { - if m != nil { - return m.WrappedKey - } - return nil -} - -func (m *SealKeyInfo) GetFlags() uint64 { - if m != nil { - return m.Flags - } - return 0 -} - -func init() { - proto.RegisterType((*EncryptedBlobInfo)(nil), "physical.EncryptedBlobInfo") - proto.RegisterType((*SealKeyInfo)(nil), "physical.SealKeyInfo") -} - -func init() { proto.RegisterFile("sdk/physical/types.proto", fileDescriptor_21dce1f497d1541e) } - -var fileDescriptor_21dce1f497d1541e = []byte{ - // 316 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0x5d, 0x4b, 0xfb, 0x30, - 0x14, 0xc6, 0x69, 0xd7, 0xbd, 0x9d, 0xed, 0xff, 0x47, 0x83, 0x42, 0x2e, 0x44, 0xca, 0x10, 0xac, - 0x37, 0xad, 0xe8, 0x27, 0x70, 0xbe, 0xa0, 0x8c, 0xdd, 0xc4, 0x0b, 0xc1, 0x1b, 0xc9, 0xb2, 0x6c, - 0x09, 0xed, 0x9a, 0xd0, 0x66, 0xd3, 0x7c, 0x30, 0xaf, 0xfc, 0x72, 0x92, 0x94, 0xb1, 0x7a, 0x77, - 0xce, 0x2f, 0x0f, 0x0f, 0xcf, 0x93, 0x03, 0xb8, 0x5e, 0xe6, 0x99, 0x16, 0xb6, 0x96, 0x8c, 0x16, - 0x99, 0xb1, 0x9a, 0xd7, 0xa9, 0xae, 0x94, 0x51, 0x68, 0xb0, 0xa7, 0x93, 0xef, 0x00, 0x8e, 0x1f, - 0x4b, 0x56, 0x59, 0x6d, 0xf8, 0x72, 0x5a, 0xa8, 0xc5, 0x4b, 0xb9, 0x52, 0xe8, 0x1c, 0x80, 0x49, - 0x2d, 0x78, 0x65, 0xf8, 0x97, 0xc1, 0x41, 0x1c, 0x24, 0x63, 0xd2, 0x22, 0xe8, 0x3f, 0x84, 0x72, - 0x87, 0x43, 0xcf, 0x43, 0xb9, 0x43, 0x08, 0x22, 0xb1, 0xa1, 0x0c, 0x77, 0x3c, 0xf1, 0x33, 0xc2, - 0xd0, 0xff, 0xac, 0xa8, 0xd6, 0x7c, 0x89, 0xa3, 0x38, 0x48, 0x06, 0x64, 0xbf, 0xa2, 0x6b, 0x18, - 0xe4, 0xdc, 0x7e, 0xc8, 0x72, 0xa5, 0x70, 0x37, 0x0e, 0x92, 0xd1, 0xcd, 0x69, 0xba, 0x0f, 0x94, - 0xbe, 0x72, 0x5a, 0xcc, 0xb8, 0x75, 0x31, 0x48, 0x3f, 0x6f, 0x06, 0x74, 0x04, 0x9d, 0x9c, 0x5b, - 0xdc, 0x8b, 0x83, 0x64, 0x48, 0xdc, 0x38, 0xf9, 0x09, 0x60, 0xd4, 0x92, 0xa2, 0x33, 0x18, 0xce, - 0x39, 0x13, 0xb4, 0x94, 0xf5, 0xc6, 0x07, 0x8e, 0xc8, 0x01, 0xa0, 0x0b, 0xf8, 0xf7, 0x3c, 0xbf, - 0xbb, 0x3f, 0x28, 0x42, 0xaf, 0xf8, 0x0b, 0xd1, 0x09, 0x74, 0x9d, 0xdd, 0x83, 0xaf, 0x31, 0x24, - 0xcd, 0xe2, 0x9c, 0x9d, 0xac, 0x79, 0x89, 0xfc, 0xcb, 0x01, 0xb8, 0x9f, 0x7a, 0x6b, 0x6a, 0xcd, - 0xb8, 0xf5, 0x6d, 0xc6, 0xa4, 0x45, 0x9c, 0xe7, 0x53, 0x41, 0xd7, 0xb5, 0xcf, 0x1e, 0x91, 0x66, - 0x99, 0x5e, 0xbd, 0x5f, 0xae, 0xa5, 0x11, 0xdb, 0x45, 0xca, 0xd4, 0x26, 0x13, 0xb4, 0x16, 0x92, - 0xa9, 0x4a, 0x67, 0x3b, 0xba, 0x2d, 0x4c, 0xd6, 0x3e, 0xdb, 0xa2, 0xe7, 0x2f, 0x76, 0xfb, 0x1b, - 0x00, 0x00, 0xff, 0xff, 0xc6, 0x34, 0xf8, 0x45, 0xcd, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/types.proto b/vendor/github.com/hashicorp/vault/sdk/physical/types.proto deleted file mode 100644 index 1241382d3b..0000000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/types.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/hashicorp/vault/sdk/physical"; - -package physical; - -message EncryptedBlobInfo { - bytes ciphertext = 1; - bytes iv = 2; - bytes hmac = 3; - bool wrapped = 4; - SealKeyInfo key_info = 5; - - // Key is the Key value for the entry that corresponds to - // physical.Entry.Key's value - string key = 6; -} - -// SealKeyInfo contains information regarding the seal used to encrypt the entry. -message SealKeyInfo { - // Mechanism is the method used by the seal to encrypt and sign the - // data as defined by the seal. - uint64 Mechanism = 1; - uint64 HMACMechanism = 2; - - // This is an opaque ID used by the seal to identify the specific - // key to use as defined by the seal. This could be a version, key - // label, or something else. - string KeyID = 3; - string HMACKeyID = 4; - - // These value are used when generating our own data encryption keys - // and encrypting them using the autoseal - bytes WrappedKey = 5; - - // Mechanism specific flags - uint64 Flags = 6; -} diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod deleted file mode 100644 index 672a0e5811..0000000000 --- a/vendor/github.com/hashicorp/yamux/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/hashicorp/yamux diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go index 18a078c8ad..7abc7c744c 100644 --- a/vendor/github.com/hashicorp/yamux/mux.go +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -3,7 +3,6 @@ package yamux import ( "fmt" "io" - "log" "os" "time" ) @@ -31,13 +30,8 @@ type Config struct { // window size that we allow for a stream. MaxStreamWindowSize uint32 - // LogOutput is used to control the log destination. Either Logger or - // LogOutput can be set, not both. + // LogOutput is used to control the log destination LogOutput io.Writer - - // Logger is used to pass in the logger to be used. Either Logger or - // LogOutput can be set, not both. - Logger *log.Logger } // DefaultConfig is used to return a default configuration @@ -63,11 +57,6 @@ func VerifyConfig(config *Config) error { if config.MaxStreamWindowSize < initialStreamWindow { return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) } - if config.LogOutput != nil && config.Logger != nil { - return fmt.Errorf("both Logger and LogOutput may not be set, select one") - } else if config.LogOutput == nil && config.Logger == nil { - return fmt.Errorf("one of Logger or LogOutput must be set, select one") - } return nil } diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go index a80ddec35e..32ba02e023 100644 --- a/vendor/github.com/hashicorp/yamux/session.go +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -86,14 +86,9 @@ type sendReady struct { // newSession is used to construct a new session func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { - logger := config.Logger - if logger == nil { - logger = log.New(config.LogOutput, "", log.LstdFlags) - } - s := &Session{ config: config, - logger: logger, + logger: log.New(config.LogOutput, "", log.LstdFlags), conn: conn, bufRead: bufio.NewReader(conn), pings: make(map[uint32]chan struct{}), diff --git a/vendor/github.com/oracle/oci-go-sdk/common/auth/federation_client.go b/vendor/github.com/oracle/oci-go-sdk/common/auth/federation_client.go index 3f416e9b5d..389bf2b751 100644 --- a/vendor/github.com/oracle/oci-go-sdk/common/auth/federation_client.go +++ b/vendor/github.com/oracle/oci-go-sdk/common/auth/federation_client.go @@ -9,8 +9,10 @@ import ( "crypto/rsa" "crypto/x509" "encoding/pem" + "errors" "fmt" "github.com/oracle/oci-go-sdk/common" + "io/ioutil" "net/http" "os" "strings" @@ -21,10 +23,113 @@ import ( // federationClient is a client to retrieve the security token for an instance principal necessary to sign a request. // It also provides the private key whose corresponding public key is used to retrieve the security token. type federationClient interface { + ClaimHolder PrivateKey() (*rsa.PrivateKey, error) SecurityToken() (string, error) } +// ClaimHolder is implemented by any token interface that provides access to the security claims embedded in the token. +type ClaimHolder interface { + GetClaim(key string) (interface{}, error) +} + +type genericFederationClient struct { + SessionKeySupplier sessionKeySupplier + RefreshSecurityToken func() (securityToken, error) + + securityToken securityToken + mux sync.Mutex +} + +var _ federationClient = &genericFederationClient{} + +func (c *genericFederationClient) PrivateKey() (*rsa.PrivateKey, error) { + c.mux.Lock() + defer c.mux.Unlock() + + if err := c.renewKeyAndSecurityTokenIfNotValid(); err != nil { + return nil, err + } + return c.SessionKeySupplier.PrivateKey(), nil +} + +func (c *genericFederationClient) SecurityToken() (token string, err error) { + c.mux.Lock() + defer c.mux.Unlock() + + if err = c.renewKeyAndSecurityTokenIfNotValid(); err != nil { + return "", err + } + return c.securityToken.String(), nil +} + +func (c *genericFederationClient) renewKeyAndSecurityTokenIfNotValid() (err error) { + if c.securityToken == nil || !c.securityToken.Valid() { + if err = c.renewKeyAndSecurityToken(); err != nil { + return fmt.Errorf("failed to renew security token: %s", err.Error()) + } + } + return nil +} + +func (c *genericFederationClient) renewKeyAndSecurityToken() (err error) { + common.Logf("Renewing keys for file based security token at: %v\n", time.Now().Format("15:04:05.000")) + if err = c.SessionKeySupplier.Refresh(); err != nil { + return fmt.Errorf("failed to refresh session key: %s", err.Error()) + } + + common.Logf("Renewing security token at: %v\n", time.Now().Format("15:04:05.000")) + if c.securityToken, err = c.RefreshSecurityToken(); err != nil { + return fmt.Errorf("failed to refresh security token key: %s", err.Error()) + } + common.Logf("Security token renewed at: %v\n", time.Now().Format("15:04:05.000")) + return nil +} + +func (c *genericFederationClient) GetClaim(key string) (interface{}, error) { + c.mux.Lock() + defer c.mux.Unlock() + + if err := c.renewKeyAndSecurityTokenIfNotValid(); err != nil { + return nil, err + } + return c.securityToken.GetClaim(key) +} + +func newFileBasedFederationClient(securityTokenPath string, supplier sessionKeySupplier) (*genericFederationClient, error) { + return &genericFederationClient{ + SessionKeySupplier: supplier, + RefreshSecurityToken: func() (token securityToken, err error) { + var content []byte + if content, err = ioutil.ReadFile(securityTokenPath); err != nil { + return nil, fmt.Errorf("failed to read security token from :%s. Due to: %s", securityTokenPath, err.Error()) + } + + var newToken securityToken + if newToken, err = newInstancePrincipalToken(string(content)); err != nil { + return nil, fmt.Errorf("failed to read security token from :%s. Due to: %s", securityTokenPath, err.Error()) + } + + return newToken, nil + }, + }, nil +} + +func newStaticFederationClient(sessionToken string, supplier sessionKeySupplier) (*genericFederationClient, error) { + var newToken securityToken + var err error + if newToken, err = newInstancePrincipalToken(string(sessionToken)); err != nil { + return nil, fmt.Errorf("failed to read security token. Due to: %s", err.Error()) + } + + return &genericFederationClient{ + SessionKeySupplier: supplier, + RefreshSecurityToken: func() (token securityToken, err error) { + return newToken, nil + }, + }, nil +} + // x509FederationClient retrieves a security token from Auth service. type x509FederationClient struct { tenancyID string @@ -197,6 +302,16 @@ func (c *x509FederationClient) getSecurityToken() (securityToken, error) { return newInstancePrincipalToken(response.Token.Token) } +func (c *x509FederationClient) GetClaim(key string) (interface{}, error) { + c.mux.Lock() + defer c.mux.Unlock() + + if err := c.renewSecurityTokenIfNotValid(); err != nil { + return nil, err + } + return c.securityToken.GetClaim(key) +} + type x509FederationRequest struct { X509FederationDetails `contributesTo:"body"` } @@ -249,6 +364,103 @@ type sessionKeySupplier interface { PublicKeyPemRaw() []byte } +//genericKeySupplier implements sessionKeySupplier and provides an arbitrary refresh mechanism +type genericKeySupplier struct { + RefreshFn func() (*rsa.PrivateKey, []byte, error) + + privateKey *rsa.PrivateKey + publicKeyPemRaw []byte +} + +func (s genericKeySupplier) PrivateKey() *rsa.PrivateKey { + if s.privateKey == nil { + return nil + } + + c := *s.privateKey + return &c +} + +func (s genericKeySupplier) PublicKeyPemRaw() []byte { + if s.publicKeyPemRaw == nil { + return nil + } + + c := make([]byte, len(s.publicKeyPemRaw)) + copy(c, s.publicKeyPemRaw) + return c +} + +func (s *genericKeySupplier) Refresh() (err error) { + privateKey, publicPem, err := s.RefreshFn() + if err != nil { + return err + } + + s.privateKey = privateKey + s.publicKeyPemRaw = publicPem + return nil +} + +// create a sessionKeySupplier that reads keys from file every time it refreshes +func newFileBasedKeySessionSupplier(privateKeyPemPath string, passphrasePath *string) (*genericKeySupplier, error) { + return &genericKeySupplier{ + RefreshFn: func() (*rsa.PrivateKey, []byte, error) { + var err error + var passContent []byte + if passphrasePath != nil { + if passContent, err = ioutil.ReadFile(*passphrasePath); err != nil { + return nil, nil, fmt.Errorf("can not read passphrase from file: %s, due to %s", *passphrasePath, err.Error()) + } + } + + var keyPemContent []byte + if keyPemContent, err = ioutil.ReadFile(privateKeyPemPath); err != nil { + return nil, nil, fmt.Errorf("can not read private privateKey pem from file: %s, due to %s", privateKeyPemPath, err.Error()) + } + + var privateKey *rsa.PrivateKey + if privateKey, err = common.PrivateKeyFromBytesWithPassword(keyPemContent, passContent); err != nil { + return nil, nil, fmt.Errorf("can not create private privateKey from contents of: %s, due to: %s", privateKeyPemPath, err.Error()) + } + + var publicKeyAsnBytes []byte + if publicKeyAsnBytes, err = x509.MarshalPKIXPublicKey(privateKey.Public()); err != nil { + return nil, nil, fmt.Errorf("failed to marshal the public part of the new keypair: %s", err.Error()) + } + publicKeyPemRaw := pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: publicKeyAsnBytes, + }) + return privateKey, publicKeyPemRaw, nil + }, + }, nil +} + +func newStaticKeySessionSupplier(privateKeyPemContent, passphrase []byte) (*genericKeySupplier, error) { + var err error + var privateKey *rsa.PrivateKey + + if privateKey, err = common.PrivateKeyFromBytesWithPassword(privateKeyPemContent, passphrase); err != nil { + return nil, fmt.Errorf("can not create private privateKey, due to: %s", err.Error()) + } + + var publicKeyAsnBytes []byte + if publicKeyAsnBytes, err = x509.MarshalPKIXPublicKey(privateKey.Public()); err != nil { + return nil, fmt.Errorf("failed to marshal the public part of the new keypair: %s", err.Error()) + } + publicKeyPemRaw := pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: publicKeyAsnBytes, + }) + + return &genericKeySupplier{ + RefreshFn: func() (key *rsa.PrivateKey, bytes []byte, err error) { + return privateKey, publicKeyPemRaw, nil + }, + }, nil +} + // inMemorySessionKeySupplier implements sessionKeySupplier to vend an RSA keypair. // Refresh() generates a new RSA keypair with a random source, and keeps it in memory. // @@ -311,6 +523,8 @@ func (s *inMemorySessionKeySupplier) PublicKeyPemRaw() []byte { type securityToken interface { fmt.Stringer Valid() bool + + ClaimHolder } type instancePrincipalToken struct { @@ -333,3 +547,15 @@ func (t *instancePrincipalToken) String() string { func (t *instancePrincipalToken) Valid() bool { return !t.jwtToken.expired() } + +var ( + // ErrNoSuchClaim is returned when a token does not hold the claim sought + ErrNoSuchClaim = errors.New("no such claim") +) + +func (t *instancePrincipalToken) GetClaim(key string) (interface{}, error) { + if value, ok := t.jwtToken.payload[key]; ok { + return value, nil + } + return nil, ErrNoSuchClaim +} diff --git a/vendor/github.com/oracle/oci-go-sdk/common/auth/resouce_principal_key_provider.go b/vendor/github.com/oracle/oci-go-sdk/common/auth/resouce_principal_key_provider.go new file mode 100644 index 0000000000..bdde8acc72 --- /dev/null +++ b/vendor/github.com/oracle/oci-go-sdk/common/auth/resouce_principal_key_provider.go @@ -0,0 +1,183 @@ +package auth + +import ( + "crypto/rsa" + "errors" + "fmt" + "github.com/oracle/oci-go-sdk/common" + "os" + "path" +) + +const ( + //ResourcePrincipalVersion2_2 supported version for resource principals + ResourcePrincipalVersion2_2 = "2.2" + //ResourcePrincipalVersionEnvVar environment var name for version + ResourcePrincipalVersionEnvVar = "OCI_RESOURCE_PRINCIPAL_VERSION" + //ResourcePrincipalRPSTEnvVar environment var name holding the token or a path to the token + ResourcePrincipalRPSTEnvVar = "OCI_RESOURCE_PRINCIPAL_RPST" + //ResourcePrincipalPrivatePEMEnvVar environment var holding a rsa private key in pem format or a path to one + ResourcePrincipalPrivatePEMEnvVar = "OCI_RESOURCE_PRINCIPAL_PRIVATE_PEM" + //ResourcePrincipalPrivatePEMPassphraseEnvVar environment var holding the passphrase to a key or a path to one + ResourcePrincipalPrivatePEMPassphraseEnvVar = "OCI_RESOURCE_PRINCIPAL_PRIVATE_PEM_PASSPHRASE" + //ResourcePrincipalRegionEnvVar environment variable holding a region + ResourcePrincipalRegionEnvVar = "OCI_RESOURCE_PRINCIPAL_REGION" + + // TenancyOCIDClaimKey is the key used to look up the resource tenancy in an RPST + TenancyOCIDClaimKey = "res_tenant" + // CompartmentOCIDClaimKey is the key used to look up the resource compartment in an RPST + CompartmentOCIDClaimKey = "res_compartment" +) + +// ConfigurationProviderWithClaimAccess mixes in a method to access the claims held on the underlying security token +type ConfigurationProviderWithClaimAccess interface { + common.ConfigurationProvider + ClaimHolder +} + +// ResourcePrincipalConfigurationProvider returns a resource principal configuration provider using well known +// environment variables to look up token information. The environment variables can either paths or contain the material value +// of the keys. However in the case of the keys and tokens paths and values can not be mixed +func ResourcePrincipalConfigurationProvider() (ConfigurationProviderWithClaimAccess, error) { + var version string + var ok bool + if version, ok = os.LookupEnv(ResourcePrincipalVersionEnvVar); !ok { + return nil, fmt.Errorf("can not create resource principal, environment variable: %s, not present", ResourcePrincipalVersionEnvVar) + } + + switch version { + case ResourcePrincipalVersion2_2: + rpst := requireEnv(ResourcePrincipalRPSTEnvVar) + if rpst == nil { + return nil, fmt.Errorf("can not create resource principal, environment variable: %s, not present", ResourcePrincipalRPSTEnvVar) + } + private := requireEnv(ResourcePrincipalPrivatePEMEnvVar) + if private == nil { + return nil, fmt.Errorf("can not create resource principal, environment variable: %s, not present", ResourcePrincipalPrivatePEMEnvVar) + } + passphrase := requireEnv(ResourcePrincipalPrivatePEMPassphraseEnvVar) + region := requireEnv(ResourcePrincipalRegionEnvVar) + if region == nil { + return nil, fmt.Errorf("can not create resource principal, environment variable: %s, not present", ResourcePrincipalRegionEnvVar) + } + return newResourcePrincipalKeyProvider22( + *rpst, *private, passphrase, *region) + default: + return nil, fmt.Errorf("can not create resource principal, environment variable: %s, must be valid", ResourcePrincipalVersionEnvVar) + } +} + +func requireEnv(key string) *string { + if val, ok := os.LookupEnv(key); ok { + return &val + } + return nil +} + +// resourcePrincipalKeyProvider22 is key provider that reads from specified the specified environment variables +// the environment variables can host the material keys/passphrases or they can be paths to files that need to be read +type resourcePrincipalKeyProvider struct { + FederationClient federationClient + KeyProviderRegion common.Region +} + +func newResourcePrincipalKeyProvider22(sessionTokenLocation, privatePemLocation string, + passphraseLocation *string, region string) (*resourcePrincipalKeyProvider, error) { + + //Check both the the passphrase and the key are paths + if passphraseLocation != nil && (!isPath(privatePemLocation) && isPath(*passphraseLocation) || + isPath(privatePemLocation) && !isPath(*passphraseLocation)) { + return nil, fmt.Errorf("cant not create resource principal: both key and passphrase need to be path or none needs to be path") + } + + var supplier sessionKeySupplier + var err error + + //File based case + if isPath(privatePemLocation) { + supplier, err = newFileBasedKeySessionSupplier(privatePemLocation, passphraseLocation) + if err != nil { + return nil, fmt.Errorf("can not create resource principal, due to: %s ", err.Error()) + } + } else { + //else the content is in the env vars + var passphrase []byte + if passphraseLocation != nil { + passphrase = []byte(*passphraseLocation) + } + supplier, err = newStaticKeySessionSupplier([]byte(privatePemLocation), passphrase) + if err != nil { + return nil, fmt.Errorf("can not create resource principal, due to: %s ", err.Error()) + } + } + + var fd federationClient + if isPath(sessionTokenLocation) { + fd, _ = newFileBasedFederationClient(sessionTokenLocation, supplier) + } else { + fd, err = newStaticFederationClient(sessionTokenLocation, supplier) + if err != nil { + return nil, fmt.Errorf("can not create resource principal, due to: %s ", err.Error()) + } + } + + rs := resourcePrincipalKeyProvider{ + FederationClient: fd, + KeyProviderRegion: common.StringToRegion(region), + } + return &rs, nil +} + +func (p *resourcePrincipalKeyProvider) PrivateRSAKey() (privateKey *rsa.PrivateKey, err error) { + if privateKey, err = p.FederationClient.PrivateKey(); err != nil { + err = fmt.Errorf("failed to get private key: %s", err.Error()) + return nil, err + } + return privateKey, nil +} + +func (p *resourcePrincipalKeyProvider) KeyID() (string, error) { + var securityToken string + var err error + if securityToken, err = p.FederationClient.SecurityToken(); err != nil { + return "", fmt.Errorf("failed to get security token: %s", err.Error()) + } + return fmt.Sprintf("ST$%s", securityToken), nil +} + +func (p *resourcePrincipalKeyProvider) Region() (string, error) { + return string(p.KeyProviderRegion), nil +} + +var ( + // ErrNonStringClaim is returned if the token has a claim for a key, but it's not a string value + ErrNonStringClaim = errors.New("claim does not have a string value") +) + +func (p *resourcePrincipalKeyProvider) TenancyOCID() (string, error) { + if claim, err := p.GetClaim(TenancyOCIDClaimKey); err != nil { + return "", err + } else if tenancy, ok := claim.(string); ok { + return tenancy, nil + } else { + return "", ErrNonStringClaim + } +} + +func (p *resourcePrincipalKeyProvider) GetClaim(claim string) (interface{}, error) { + return p.FederationClient.GetClaim(claim) +} + +func (p *resourcePrincipalKeyProvider) KeyFingerprint() (string, error) { + return "", nil +} + +func (p *resourcePrincipalKeyProvider) UserOCID() (string, error) { + return "", nil +} + +// By contract for the the content of a resource principal to be considered path, it needs to be +// an absolute path. +func isPath(str string) bool { + return path.IsAbs(str) +} diff --git a/vendor/github.com/oracle/oci-go-sdk/common/auth/utils.go b/vendor/github.com/oracle/oci-go-sdk/common/auth/utils.go index 2845f1b04e..2e1d455c51 100644 --- a/vendor/github.com/oracle/oci-go-sdk/common/auth/utils.go +++ b/vendor/github.com/oracle/oci-go-sdk/common/auth/utils.go @@ -19,6 +19,8 @@ func httpGet(dispatcher common.HTTPRequestDispatcher, url string) (body bytes.Bu var response *http.Response request, err := http.NewRequest(http.MethodGet, url, nil) + request.Header.Add("Authorization", "Bearer Oracle") + if response, err = dispatcher.Do(request); err != nil { return } diff --git a/vendor/github.com/oracle/oci-go-sdk/common/common.go b/vendor/github.com/oracle/oci-go-sdk/common/common.go index 0c18d8c6e3..589d610e73 100644 --- a/vendor/github.com/oracle/oci-go-sdk/common/common.go +++ b/vendor/github.com/oracle/oci-go-sdk/common/common.go @@ -32,7 +32,10 @@ const ( RegionAPMumbai1 Region = "ap-mumbai-1" //RegionEUZurich1 region for Zurich RegionEUZurich1 Region = "eu-zurich-1" - + //RegionSASaopaulo1 region for Sao Paulo + RegionSASaopaulo1 Region = "sa-saopaulo-1" + //RegionAPSydney1 region for Sydney + RegionAPSydney1 Region = "ap-sydney-1" //RegionUSLangley1 region for langley RegionUSLangley1 Region = "us-langley-1" //RegionUSLuke1 region for luke @@ -53,15 +56,17 @@ var realm = map[string]string{ } var regionRealm = map[Region]string{ - RegionPHX: "oc1", - RegionIAD: "oc1", - RegionFRA: "oc1", - RegionLHR: "oc1", - RegionCAToronto1: "oc1", - RegionAPTokyo1: "oc1", - RegionAPSeoul1: "oc1", - RegionAPMumbai1: "oc1", - RegionEUZurich1: "oc1", + RegionPHX: "oc1", + RegionIAD: "oc1", + RegionFRA: "oc1", + RegionLHR: "oc1", + RegionCAToronto1: "oc1", + RegionAPTokyo1: "oc1", + RegionAPSeoul1: "oc1", + RegionAPSydney1: "oc1", + RegionAPMumbai1: "oc1", + RegionEUZurich1: "oc1", + RegionSASaopaulo1: "oc1", RegionUSLangley1: "oc2", RegionUSLuke1: "oc2", @@ -109,7 +114,7 @@ func StringToRegion(stringRegion string) (r Region) { switch strings.ToLower(stringRegion) { case "sea": r = RegionSEA - case "ca-toronto-1": + case "yyz", "ca-toronto-1": r = RegionCAToronto1 case "phx", "us-phoenix-1": r = RegionPHX @@ -119,14 +124,18 @@ func StringToRegion(stringRegion string) (r Region) { r = RegionFRA case "lhr", "uk-london-1": r = RegionLHR - case "ap-tokyo-1": + case "nrt", "ap-tokyo-1": r = RegionAPTokyo1 - case "ap-seoul-1": + case "icn", "ap-seoul-1": r = RegionAPSeoul1 - case "ap-mumbai-1": + case "bom", "ap-mumbai-1": r = RegionAPMumbai1 - case "eu-zurich-1": + case "zrh", "eu-zurich-1": r = RegionEUZurich1 + case "gru", "sa-saopaulo-1": + r = RegionSASaopaulo1 + case "syd", "ap-sydney-1": + r = RegionAPSydney1 case "us-langley-1": r = RegionUSLangley1 case "us-luke-1": diff --git a/vendor/github.com/oracle/oci-go-sdk/common/helpers.go b/vendor/github.com/oracle/oci-go-sdk/common/helpers.go index de13854eba..94f20208f5 100644 --- a/vendor/github.com/oracle/oci-go-sdk/common/helpers.go +++ b/vendor/github.com/oracle/oci-go-sdk/common/helpers.go @@ -204,16 +204,27 @@ func (t *SDKDate) MarshalJSON() (buff []byte, e error) { } // PrivateKeyFromBytes is a helper function that will produce a RSA private -// key from bytes. +// key from bytes. This function is deprecated in favour of PrivateKeyFromBytesWithPassword +// Deprecated func PrivateKeyFromBytes(pemData []byte, password *string) (key *rsa.PrivateKey, e error) { + if password == nil { + return PrivateKeyFromBytesWithPassword(pemData, nil) + } + + return PrivateKeyFromBytesWithPassword(pemData, []byte(*password)) +} + +// PrivateKeyFromBytesWithPassword is a helper function that will produce a RSA private +// key from bytes and a password. +func PrivateKeyFromBytesWithPassword(pemData, password []byte) (key *rsa.PrivateKey, e error) { if pemBlock, _ := pem.Decode(pemData); pemBlock != nil { decrypted := pemBlock.Bytes if x509.IsEncryptedPEMBlock(pemBlock) { if password == nil { - e = fmt.Errorf("private_key_password is required for encrypted private keys") + e = fmt.Errorf("private key password is required for encrypted private keys") return } - if decrypted, e = x509.DecryptPEMBlock(pemBlock, []byte(*password)); e != nil { + if decrypted, e = x509.DecryptPEMBlock(pemBlock, password); e != nil { return } } diff --git a/vendor/github.com/oracle/oci-go-sdk/common/http.go b/vendor/github.com/oracle/oci-go-sdk/common/http.go index 4087ce180e..b32232abfb 100644 --- a/vendor/github.com/oracle/oci-go-sdk/common/http.go +++ b/vendor/github.com/oracle/oci-go-sdk/common/http.go @@ -57,9 +57,9 @@ func toStringValue(v reflect.Value, field reflect.StructField) (string, error) { case reflect.String: return v.String(), nil case reflect.Float32: - return strconv.FormatFloat(v.Float(), 'f', 6, 32), nil + return strconv.FormatFloat(v.Float(), 'f', -1, 32), nil case reflect.Float64: - return strconv.FormatFloat(v.Float(), 'f', 6, 64), nil + return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil default: return "", fmt.Errorf("marshaling structure to a http.Request does not support field named: %s of type: %v", field.Name, v.Type().String()) diff --git a/vendor/github.com/oracle/oci-go-sdk/common/version.go b/vendor/github.com/oracle/oci-go-sdk/common/version.go index b6dfb952f8..79ccb25ce1 100644 --- a/vendor/github.com/oracle/oci-go-sdk/common/version.go +++ b/vendor/github.com/oracle/oci-go-sdk/common/version.go @@ -10,8 +10,8 @@ import ( ) const ( - major = "7" - minor = "0" + major = "12" + minor = "5" patch = "0" tag = "" ) diff --git a/vendor/github.com/oracle/oci-go-sdk/keymanagement/create_vault_details.go b/vendor/github.com/oracle/oci-go-sdk/keymanagement/create_vault_details.go index f9e04c17b5..c6fa31d2df 100644 --- a/vendor/github.com/oracle/oci-go-sdk/keymanagement/create_vault_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/keymanagement/create_vault_details.go @@ -44,11 +44,11 @@ type CreateVaultDetailsVaultTypeEnum string // Set of constants representing the allowable values for CreateVaultDetailsVaultTypeEnum const ( - CreateVaultDetailsVaultTypePrivate CreateVaultDetailsVaultTypeEnum = "VIRTUAL_PRIVATE" + CreateVaultDetailsVaultTypeVirtualPrivate CreateVaultDetailsVaultTypeEnum = "VIRTUAL_PRIVATE" ) var mappingCreateVaultDetailsVaultType = map[string]CreateVaultDetailsVaultTypeEnum{ - "VIRTUAL_PRIVATE": CreateVaultDetailsVaultTypePrivate, + "VIRTUAL_PRIVATE": CreateVaultDetailsVaultTypeVirtualPrivate, } // GetCreateVaultDetailsVaultTypeEnumValues Enumerates the set of values for CreateVaultDetailsVaultTypeEnum diff --git a/vendor/github.com/oracle/oci-go-sdk/keymanagement/vault.go b/vendor/github.com/oracle/oci-go-sdk/keymanagement/vault.go index ea120176a2..6a19aa76d0 100644 --- a/vendor/github.com/oracle/oci-go-sdk/keymanagement/vault.go +++ b/vendor/github.com/oracle/oci-go-sdk/keymanagement/vault.go @@ -100,11 +100,11 @@ type VaultVaultTypeEnum string // Set of constants representing the allowable values for VaultVaultTypeEnum const ( - VaultVaultTypePrivate VaultVaultTypeEnum = "VIRTUAL_PRIVATE" + VaultVaultTypeVirtualPrivate VaultVaultTypeEnum = "VIRTUAL_PRIVATE" ) var mappingVaultVaultType = map[string]VaultVaultTypeEnum{ - "VIRTUAL_PRIVATE": VaultVaultTypePrivate, + "VIRTUAL_PRIVATE": VaultVaultTypeVirtualPrivate, } // GetVaultVaultTypeEnumValues Enumerates the set of values for VaultVaultTypeEnum diff --git a/vendor/github.com/oracle/oci-go-sdk/keymanagement/vault_summary.go b/vendor/github.com/oracle/oci-go-sdk/keymanagement/vault_summary.go index 1b4fcc10b1..4679ddadcf 100644 --- a/vendor/github.com/oracle/oci-go-sdk/keymanagement/vault_summary.go +++ b/vendor/github.com/oracle/oci-go-sdk/keymanagement/vault_summary.go @@ -96,11 +96,11 @@ type VaultSummaryVaultTypeEnum string // Set of constants representing the allowable values for VaultSummaryVaultTypeEnum const ( - VaultSummaryVaultTypePrivate VaultSummaryVaultTypeEnum = "VIRTUAL_PRIVATE" + VaultSummaryVaultTypeVirtualPrivate VaultSummaryVaultTypeEnum = "VIRTUAL_PRIVATE" ) var mappingVaultSummaryVaultType = map[string]VaultSummaryVaultTypeEnum{ - "VIRTUAL_PRIVATE": VaultSummaryVaultTypePrivate, + "VIRTUAL_PRIVATE": VaultSummaryVaultTypeVirtualPrivate, } // GetVaultSummaryVaultTypeEnumValues Enumerates the set of values for VaultSummaryVaultTypeEnum diff --git a/vendor/github.com/oracle/oci-go-sdk/objectstorage/bucket.go b/vendor/github.com/oracle/oci-go-sdk/objectstorage/bucket.go index 2131dfa6c5..482914963c 100644 --- a/vendor/github.com/oracle/oci-go-sdk/objectstorage/bucket.go +++ b/vendor/github.com/oracle/oci-go-sdk/objectstorage/bucket.go @@ -55,6 +55,10 @@ type Bucket struct { // The 'storageTier' property is immutable after bucket is created. StorageTier BucketStorageTierEnum `mandatory:"false" json:"storageTier,omitempty"` + // A property that determines whether events will be generated for operations on objects in this bucket. + // This is false by default. + ObjectEventsEnabled *bool `mandatory:"false" json:"objectEventsEnabled"` + // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Department": "Finance"}` diff --git a/vendor/github.com/oracle/oci-go-sdk/objectstorage/create_bucket_details.go b/vendor/github.com/oracle/oci-go-sdk/objectstorage/create_bucket_details.go index 455883b367..a2202c9daf 100644 --- a/vendor/github.com/oracle/oci-go-sdk/objectstorage/create_bucket_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/objectstorage/create_bucket_details.go @@ -41,6 +41,10 @@ type CreateBucketDetails struct { // property is immutable after bucket is created. StorageTier CreateBucketDetailsStorageTierEnum `mandatory:"false" json:"storageTier,omitempty"` + // A property that determines whether events will be generated for operations on objects in this bucket. + // This is false by default. + ObjectEventsEnabled *bool `mandatory:"false" json:"objectEventsEnabled"` + // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Department": "Finance"}` diff --git a/vendor/github.com/oracle/oci-go-sdk/objectstorage/objectstorage_client.go b/vendor/github.com/oracle/oci-go-sdk/objectstorage/objectstorage_client.go index c23018f9cb..774e161a9f 100644 --- a/vendor/github.com/oracle/oci-go-sdk/objectstorage/objectstorage_client.go +++ b/vendor/github.com/oracle/oci-go-sdk/objectstorage/objectstorage_client.go @@ -250,7 +250,7 @@ func (client ObjectStorageClient) CreateBucket(ctx context.Context, request Crea // createBucket implements the OCIOperation interface (enables retrying operations) func (client ObjectStorageClient) createBucket(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { - httpRequest, err := request.HTTPRequest(http.MethodPost, "/n/{namespaceName}/b/") + httpRequest, err := request.HTTPRequest(http.MethodPost, "/n/{namespaceName}/b") if err != nil { return nil, err } @@ -334,7 +334,7 @@ func (client ObjectStorageClient) CreatePreauthenticatedRequest(ctx context.Cont // createPreauthenticatedRequest implements the OCIOperation interface (enables retrying operations) func (client ObjectStorageClient) createPreauthenticatedRequest(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { - httpRequest, err := request.HTTPRequest(http.MethodPost, "/n/{namespaceName}/b/{bucketName}/p/") + httpRequest, err := request.HTTPRequest(http.MethodPost, "/n/{namespaceName}/b/{bucketName}/p") if err != nil { return nil, err } @@ -353,8 +353,9 @@ func (client ObjectStorageClient) createPreauthenticatedRequest(ctx context.Cont } // DeleteBucket Deletes a bucket if the bucket is already empty. If the bucket is not empty, use -// DeleteObject first. You also cannot -// delete a bucket that has a pre-authenticated request associated with that bucket. +// DeleteObject first. In addition, +// you cannot delete a bucket that has a multipart upload in progress or a pre-authenticated +// request associated with that bucket. func (client ObjectStorageClient) DeleteBucket(ctx context.Context, request DeleteBucketRequest) (response DeleteBucketResponse, err error) { var ociResponse common.OCIResponse policy := common.NoRetryPolicy() @@ -378,7 +379,7 @@ func (client ObjectStorageClient) DeleteBucket(ctx context.Context, request Dele // deleteBucket implements the OCIOperation interface (enables retrying operations) func (client ObjectStorageClient) deleteBucket(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { - httpRequest, err := request.HTTPRequest(http.MethodDelete, "/n/{namespaceName}/b/{bucketName}/") + httpRequest, err := request.HTTPRequest(http.MethodDelete, "/n/{namespaceName}/b/{bucketName}") if err != nil { return nil, err } @@ -546,7 +547,7 @@ func (client ObjectStorageClient) GetBucket(ctx context.Context, request GetBuck // getBucket implements the OCIOperation interface (enables retrying operations) func (client ObjectStorageClient) getBucket(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { - httpRequest, err := request.HTTPRequest(http.MethodGet, "/n/{namespaceName}/b/{bucketName}/") + httpRequest, err := request.HTTPRequest(http.MethodGet, "/n/{namespaceName}/b/{bucketName}") if err != nil { return nil, err } @@ -593,7 +594,7 @@ func (client ObjectStorageClient) GetNamespace(ctx context.Context, request GetN // getNamespace implements the OCIOperation interface (enables retrying operations) func (client ObjectStorageClient) getNamespace(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { - httpRequest, err := request.HTTPRequest(http.MethodGet, "/n/") + httpRequest, err := request.HTTPRequest(http.MethodGet, "/n") if err != nil { return nil, err } @@ -613,7 +614,7 @@ func (client ObjectStorageClient) getNamespace(ctx context.Context, request comm // GetNamespaceMetadata Gets the metadata for the Object Storage namespace, which contains defaultS3CompartmentId and // defaultSwiftCompartmentId. -// Any user with the NAMESPACE_READ permission will be able to see the current metadata. If you are +// Any user with the OBJECTSTORAGE_NAMESPACE_READ permission will be able to see the current metadata. If you are // not authorized, talk to an administrator. If you are an administrator who needs to write policies // to give users access, see // Getting Started with Policies (https://docs.cloud.oracle.com/Content/Identity/Concepts/policygetstarted.htm). @@ -849,7 +850,7 @@ func (client ObjectStorageClient) HeadBucket(ctx context.Context, request HeadBu // headBucket implements the OCIOperation interface (enables retrying operations) func (client ObjectStorageClient) headBucket(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { - httpRequest, err := request.HTTPRequest(http.MethodHead, "/n/{namespaceName}/b/{bucketName}/") + httpRequest, err := request.HTTPRequest(http.MethodHead, "/n/{namespaceName}/b/{bucketName}") if err != nil { return nil, err } @@ -937,7 +938,7 @@ func (client ObjectStorageClient) ListBuckets(ctx context.Context, request ListB // listBuckets implements the OCIOperation interface (enables retrying operations) func (client ObjectStorageClient) listBuckets(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { - httpRequest, err := request.HTTPRequest(http.MethodGet, "/n/{namespaceName}/b/") + httpRequest, err := request.HTTPRequest(http.MethodGet, "/n/{namespaceName}/b") if err != nil { return nil, err } @@ -1108,7 +1109,7 @@ func (client ObjectStorageClient) ListPreauthenticatedRequests(ctx context.Conte // listPreauthenticatedRequests implements the OCIOperation interface (enables retrying operations) func (client ObjectStorageClient) listPreauthenticatedRequests(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { - httpRequest, err := request.HTTPRequest(http.MethodGet, "/n/{namespaceName}/b/{bucketName}/p/") + httpRequest, err := request.HTTPRequest(http.MethodGet, "/n/{namespaceName}/b/{bucketName}/p") if err != nil { return nil, err } @@ -1347,6 +1348,55 @@ func (client ObjectStorageClient) putObjectLifecyclePolicy(ctx context.Context, return response, err } +// ReencryptBucket Reencrypts the data encryption key of the bucket and objects in the bucket. This is an asynchronous call, the +// system will start a work request task to reencrypt the data encryption key of the objects and chunks in the bucket. +// Only the objects created before the time the API call will be reencrypted. The call can take long time depending +// on how many objects in the bucket and how big the objects are. This API will return a work request id, so the user +// can use this id to retrieve the status of the work request task. +// A user can update kmsKeyId of the bucket, and then call this API, so the data encryption key of the bucket and +// objects in the bucket will be reencryped by the new kmsKeyId. Note that the system doesn't maintain what +// ksmKeyId is used to encrypt the object, the user has to maintain the mapping if they want. +func (client ObjectStorageClient) ReencryptBucket(ctx context.Context, request ReencryptBucketRequest) (response ReencryptBucketResponse, err error) { + var ociResponse common.OCIResponse + policy := common.NoRetryPolicy() + if request.RetryPolicy() != nil { + policy = *request.RetryPolicy() + } + ociResponse, err = common.Retry(ctx, request, client.reencryptBucket, policy) + if err != nil { + if ociResponse != nil { + response = ReencryptBucketResponse{RawResponse: ociResponse.HTTPResponse()} + } + return + } + if convertedResponse, ok := ociResponse.(ReencryptBucketResponse); ok { + response = convertedResponse + } else { + err = fmt.Errorf("failed to convert OCIResponse into ReencryptBucketResponse") + } + return +} + +// reencryptBucket implements the OCIOperation interface (enables retrying operations) +func (client ObjectStorageClient) reencryptBucket(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { + httpRequest, err := request.HTTPRequest(http.MethodPost, "/n/{namespaceName}/b/{bucketName}/actions/reencrypt") + if err != nil { + return nil, err + } + + var response ReencryptBucketResponse + var httpResponse *http.Response + httpResponse, err = client.Call(ctx, &httpRequest) + defer common.CloseBodyIfValid(httpResponse) + response.RawResponse = httpResponse + if err != nil { + return response, err + } + + err = common.UnmarshalResponse(httpResponse, &response) + return response, err +} + // RenameObject Rename an object in the given Object Storage namespace. func (client ObjectStorageClient) RenameObject(ctx context.Context, request RenameObjectRequest) (response RenameObjectResponse, err error) { var ociResponse common.OCIResponse @@ -1433,6 +1483,9 @@ func (client ObjectStorageClient) restoreObjects(ctx context.Context, request co } // UpdateBucket Performs a partial or full update of a bucket's user-defined metadata. +// Use UpdateBucket to move a bucket from one compartment to another within the same tenancy. Supply the compartmentID +// of the compartment that you want to move the bucket to. For more information about moving resources between compartments, +// see Moving Resources to a Different Compartment (https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes). func (client ObjectStorageClient) UpdateBucket(ctx context.Context, request UpdateBucketRequest) (response UpdateBucketResponse, err error) { var ociResponse common.OCIResponse policy := common.NoRetryPolicy() @@ -1456,7 +1509,7 @@ func (client ObjectStorageClient) UpdateBucket(ctx context.Context, request Upda // updateBucket implements the OCIOperation interface (enables retrying operations) func (client ObjectStorageClient) updateBucket(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) { - httpRequest, err := request.HTTPRequest(http.MethodPost, "/n/{namespaceName}/b/{bucketName}/") + httpRequest, err := request.HTTPRequest(http.MethodPost, "/n/{namespaceName}/b/{bucketName}") if err != nil { return nil, err } @@ -1478,7 +1531,7 @@ func (client ObjectStorageClient) updateBucket(ctx context.Context, request comm // compartment of the Oracle Cloud Infrastructure tenancy. // You can change the default Swift/Amazon S3 compartmentId designation to a different compartmentId. All // subsequent bucket creations will use the new default compartment, but no previously created -// buckets will be modified. A user must have NAMESPACE_UPDATE permission to make changes to the default +// buckets will be modified. A user must have OBJECTSTORAGE_NAMESPACE_UPDATE permission to make changes to the default // compartments for Amazon S3 and Swift. func (client ObjectStorageClient) UpdateNamespaceMetadata(ctx context.Context, request UpdateNamespaceMetadataRequest) (response UpdateNamespaceMetadataResponse, err error) { var ociResponse common.OCIResponse diff --git a/vendor/github.com/oracle/oci-go-sdk/objectstorage/reencrypt_bucket_request_response.go b/vendor/github.com/oracle/oci-go-sdk/objectstorage/reencrypt_bucket_request_response.go new file mode 100644 index 0000000000..d7be1dc4b9 --- /dev/null +++ b/vendor/github.com/oracle/oci-go-sdk/objectstorage/reencrypt_bucket_request_response.go @@ -0,0 +1,68 @@ +// Copyright (c) 2016, 2018, 2019, Oracle and/or its affiliates. All rights reserved. +// Code generated. DO NOT EDIT. + +package objectstorage + +import ( + "github.com/oracle/oci-go-sdk/common" + "net/http" +) + +// ReencryptBucketRequest wrapper for the ReencryptBucket operation +type ReencryptBucketRequest struct { + + // The Object Storage namespace used for the request. + NamespaceName *string `mandatory:"true" contributesTo:"path" name:"namespaceName"` + + // The name of the bucket. Avoid entering confidential information. + // Example: `my-new-bucket1` + BucketName *string `mandatory:"true" contributesTo:"path" name:"bucketName"` + + // The client request ID for tracing. + OpcClientRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-client-request-id"` + + // Metadata about the request. This information will not be transmitted to the service, but + // represents information that the SDK will consume to drive retry behavior. + RequestMetadata common.RequestMetadata +} + +func (request ReencryptBucketRequest) String() string { + return common.PointerString(request) +} + +// HTTPRequest implements the OCIRequest interface +func (request ReencryptBucketRequest) HTTPRequest(method, path string) (http.Request, error) { + return common.MakeDefaultHTTPRequestWithTaggedStruct(method, path, request) +} + +// RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy. +func (request ReencryptBucketRequest) RetryPolicy() *common.RetryPolicy { + return request.RequestMetadata.RetryPolicy +} + +// ReencryptBucketResponse wrapper for the ReencryptBucket operation +type ReencryptBucketResponse struct { + + // The underlying http response + RawResponse *http.Response + + // Unique Oracle-assigned identifier for the asynchronous request. If you need to contact Oracle about a + // particular request, provide this request ID. + OpcWorkRequestId *string `presentIn:"header" name:"opc-work-request-id"` + + // Echoes back the value passed in the opc-client-request-id header, for use by clients when debugging. + OpcClientRequestId *string `presentIn:"header" name:"opc-client-request-id"` + + // Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular + // request, provide this request ID. + OpcRequestId *string `presentIn:"header" name:"opc-request-id"` +} + +func (response ReencryptBucketResponse) String() string { + return common.PointerString(response) +} + +// HTTPResponse implements the OCIResponse interface +func (response ReencryptBucketResponse) HTTPResponse() *http.Response { + return response.RawResponse +} diff --git a/vendor/github.com/oracle/oci-go-sdk/objectstorage/update_bucket_details.go b/vendor/github.com/oracle/oci-go-sdk/objectstorage/update_bucket_details.go index 5751dff8c0..df01a64ff9 100644 --- a/vendor/github.com/oracle/oci-go-sdk/objectstorage/update_bucket_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/objectstorage/update_bucket_details.go @@ -20,7 +20,7 @@ type UpdateBucketDetails struct { // The Object Storage namespace in which the bucket lives. Namespace *string `mandatory:"false" json:"namespace"` - // The compartmentId for the compartment to which the bucket is targeted to move to. + // The compartmentId for the compartment to move the bucket to. CompartmentId *string `mandatory:"false" json:"compartmentId"` // The name of the bucket. Avoid entering confidential information. @@ -36,6 +36,10 @@ type UpdateBucketDetails struct { // on the bucket, public access is allowed for the `GetObject` and `HeadObject` operations. PublicAccessType UpdateBucketDetailsPublicAccessTypeEnum `mandatory:"false" json:"publicAccessType,omitempty"` + // A property that determines whether events will be generated for operations on objects in this bucket. + // This is false by default. + ObjectEventsEnabled *bool `mandatory:"false" json:"objectEventsEnabled"` + // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Department": "Finance"}` diff --git a/vendor/github.com/oracle/oci-go-sdk/objectstorage/update_namespace_metadata_details.go b/vendor/github.com/oracle/oci-go-sdk/objectstorage/update_namespace_metadata_details.go index 01c213baf1..b51c845640 100644 --- a/vendor/github.com/oracle/oci-go-sdk/objectstorage/update_namespace_metadata_details.go +++ b/vendor/github.com/oracle/oci-go-sdk/objectstorage/update_namespace_metadata_details.go @@ -13,7 +13,7 @@ import ( ) // UpdateNamespaceMetadataDetails UpdateNamespaceMetadataDetails is used to update the NamespaceMetadata. To update NamespaceMetadata, a user -// must have NAMESPACE_UPDATE permission. +// must have OBJECTSTORAGE_NAMESPACE_UPDATE permission. type UpdateNamespaceMetadataDetails struct { // The updated compartment id for use by an S3 client, if this field is set. diff --git a/vendor/github.com/oracle/oci-go-sdk/objectstorage/work_request.go b/vendor/github.com/oracle/oci-go-sdk/objectstorage/work_request.go index f1b69c1f84..bd1aab9eb4 100644 --- a/vendor/github.com/oracle/oci-go-sdk/objectstorage/work_request.go +++ b/vendor/github.com/oracle/oci-go-sdk/objectstorage/work_request.go @@ -24,10 +24,11 @@ type WorkRequest struct { // The id of the work request. Id *string `mandatory:"false" json:"id"` - // The OCID of the compartment that contains the work request. Work requests should be scoped to - // the same compartment as the resource the work request affects. If the work request affects multiple resources, - // and those resources are not in the same compartment, it is up to the service team to pick the primary - // resource whose compartment should be used. + // The OCID of the compartment that contains the work request. Work requests are scoped to the same compartment + // as the resource the work request affects. + // If the work request affects multiple resources and those resources are not in the same compartment, the OCID of + // the primary resource is used. For example, you can copy an object in a bucket in one compartment to a bucket in + // another compartment. In this case, the OCID of the source compartment is used. CompartmentId *string `mandatory:"false" json:"compartmentId"` Resources []WorkRequestResource `mandatory:"false" json:"resources"` @@ -57,11 +58,13 @@ type WorkRequestOperationTypeEnum string // Set of constants representing the allowable values for WorkRequestOperationTypeEnum const ( - WorkRequestOperationTypeObject WorkRequestOperationTypeEnum = "COPY_OBJECT" + WorkRequestOperationTypeCopyObject WorkRequestOperationTypeEnum = "COPY_OBJECT" + WorkRequestOperationTypeReencrypt WorkRequestOperationTypeEnum = "REENCRYPT" ) var mappingWorkRequestOperationType = map[string]WorkRequestOperationTypeEnum{ - "COPY_OBJECT": WorkRequestOperationTypeObject, + "COPY_OBJECT": WorkRequestOperationTypeCopyObject, + "REENCRYPT": WorkRequestOperationTypeReencrypt, } // GetWorkRequestOperationTypeEnumValues Enumerates the set of values for WorkRequestOperationTypeEnum diff --git a/vendor/github.com/oracle/oci-go-sdk/objectstorage/work_request_summary.go b/vendor/github.com/oracle/oci-go-sdk/objectstorage/work_request_summary.go index 8a99ce502c..8834025b22 100644 --- a/vendor/github.com/oracle/oci-go-sdk/objectstorage/work_request_summary.go +++ b/vendor/github.com/oracle/oci-go-sdk/objectstorage/work_request_summary.go @@ -24,10 +24,11 @@ type WorkRequestSummary struct { // The id of the work request. Id *string `mandatory:"false" json:"id"` - // The OCID of the compartment that contains the work request. Work requests should be scoped to - // the same compartment as the resource the work request affects. If the work request affects multiple resources, - // and those resources are not in the same compartment, it is up to the service team to pick the primary - // resource whose compartment should be used. + // The OCID of the compartment that contains the work request. Work requests are scoped to the same compartment + // as the resource the work request affects. + // If the work request affects multiple resources and those resources are not in the same compartment, the OCID of + // the primary resource is used. For example, you can copy an object in a bucket in one compartment to a bucket in + // another compartment. In this case, the OCID of the source compartment is used. CompartmentId *string `mandatory:"false" json:"compartmentId"` Resources []WorkRequestResource `mandatory:"false" json:"resources"` @@ -57,11 +58,13 @@ type WorkRequestSummaryOperationTypeEnum string // Set of constants representing the allowable values for WorkRequestSummaryOperationTypeEnum const ( - WorkRequestSummaryOperationTypeObject WorkRequestSummaryOperationTypeEnum = "COPY_OBJECT" + WorkRequestSummaryOperationTypeCopyObject WorkRequestSummaryOperationTypeEnum = "COPY_OBJECT" + WorkRequestSummaryOperationTypeReencrypt WorkRequestSummaryOperationTypeEnum = "REENCRYPT" ) var mappingWorkRequestSummaryOperationType = map[string]WorkRequestSummaryOperationTypeEnum{ - "COPY_OBJECT": WorkRequestSummaryOperationTypeObject, + "COPY_OBJECT": WorkRequestSummaryOperationTypeCopyObject, + "REENCRYPT": WorkRequestSummaryOperationTypeReencrypt, } // GetWorkRequestSummaryOperationTypeEnumValues Enumerates the set of values for WorkRequestSummaryOperationTypeEnum diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go deleted file mode 100644 index 65ab1e9966..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tracecontext contains HTTP propagator for TraceContext standard. -// See https://github.com/w3c/distributed-tracing for more information. -package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - -import ( - "encoding/hex" - "fmt" - "net/http" - "net/textproto" - "regexp" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" - "go.opencensus.io/trace/tracestate" -) - -const ( - supportedVersion = 0 - maxVersion = 254 - maxTracestateLen = 512 - traceparentHeader = "traceparent" - tracestateHeader = "tracestate" - trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$` -) - -var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt) - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// HTTPFormat implements the TraceContext trace propagation format. -type HTTPFormat struct{} - -// SpanContextFromRequest extracts a span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h, ok := getRequestHeader(req, traceparentHeader, false) - if !ok { - return trace.SpanContext{}, false - } - sections := strings.Split(h, "-") - if len(sections) < 4 { - return trace.SpanContext{}, false - } - - if len(sections[0]) != 2 { - return trace.SpanContext{}, false - } - ver, err := hex.DecodeString(sections[0]) - if err != nil { - return trace.SpanContext{}, false - } - version := int(ver[0]) - if version > maxVersion { - return trace.SpanContext{}, false - } - - if version == 0 && len(sections) != 4 { - return trace.SpanContext{}, false - } - - if len(sections[1]) != 32 { - return trace.SpanContext{}, false - } - tid, err := hex.DecodeString(sections[1]) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], tid) - - if len(sections[2]) != 16 { - return trace.SpanContext{}, false - } - sid, err := hex.DecodeString(sections[2]) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.SpanID[:], sid) - - opts, err := hex.DecodeString(sections[3]) - if err != nil || len(opts) < 1 { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(opts[0]) - - // Don't allow all zero trace or span ID. - if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} { - return trace.SpanContext{}, false - } - - sc.Tracestate = tracestateFromRequest(req) - return sc, true -} - -// getRequestHeader returns a combined header field according to RFC7230 section 3.2.2. -// If commaSeparated is true, multiple header fields with the same field name using be -// combined using ",". -// If no header was found using the given name, "ok" would be false. -// If more than one headers was found using the given name, while commaSeparated is false, -// "ok" would be false. -func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) { - v := req.Header[textproto.CanonicalMIMEHeaderKey(name)] - switch len(v) { - case 0: - return "", false - case 1: - return v[0], true - default: - return strings.Join(v, ","), commaSeparated - } -} - -// TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error. -// Revisit to return additional boolean value to indicate parsing error when following issues -// are resolved. -// https://github.com/w3c/distributed-tracing/issues/172 -// https://github.com/w3c/distributed-tracing/issues/175 -func tracestateFromRequest(req *http.Request) *tracestate.Tracestate { - h, _ := getRequestHeader(req, tracestateHeader, true) - if h == "" { - return nil - } - - var entries []tracestate.Entry - pairs := strings.Split(h, ",") - hdrLenWithoutOWS := len(pairs) - 1 // Number of commas - for _, pair := range pairs { - matches := trimOWSRegExp.FindStringSubmatch(pair) - if matches == nil { - return nil - } - pair = matches[1] - hdrLenWithoutOWS += len(pair) - if hdrLenWithoutOWS > maxTracestateLen { - return nil - } - kv := strings.Split(pair, "=") - if len(kv) != 2 { - return nil - } - entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]}) - } - ts, err := tracestate.New(nil, entries...) - if err != nil { - return nil - } - - return ts -} - -func tracestateToRequest(sc trace.SpanContext, req *http.Request) { - var pairs = make([]string, 0, len(sc.Tracestate.Entries())) - if sc.Tracestate != nil { - for _, entry := range sc.Tracestate.Entries() { - pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "=")) - } - h := strings.Join(pairs, ",") - - if h != "" && len(h) <= maxTracestateLen { - req.Header.Set(tracestateHeader, h) - } - } -} - -// SpanContextToRequest modifies the given request to include traceparent and tracestate headers. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - h := fmt.Sprintf("%x-%x-%x-%x", - []byte{supportedVersion}, - sc.TraceID[:], - sc.SpanID[:], - []byte{byte(sc.TraceOptions)}) - req.Header.Set(traceparentHeader, h) - tracestateToRequest(sc, req) -} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 6eb2aa95f5..81de32b360 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -194,9 +194,16 @@ func (cs computeSource) Token() (*oauth2.Token, error) { if res.ExpiresInSec == 0 || res.AccessToken == "" { return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") } - return &oauth2.Token{ + tok := &oauth2.Token{ AccessToken: res.AccessToken, TokenType: res.TokenType, Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil + } + // NOTE(cbro): add hidden metadata about where the token is from. + // This is needed for detection by client libraries to know that credentials come from the metadata server. + // This may be removed in a future version of this library. + return tok.WithExtra(map[string]interface{}{ + "oauth2.google.tokenSource": "compute-metadata", + "oauth2.google.serviceAccount": acct, + }), nil } diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go index 99f3e0a32c..b2bf18298b 100644 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -66,6 +66,14 @@ type Config struct { // request. If empty, the value of TokenURL is used as the // intended audience. Audience string + + // PrivateClaims optionally specifies custom private claims in the JWT. + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + PrivateClaims map[string]interface{} + + // UseIDToken optionally specifies whether ID token should be used instead + // of access token when the server returns both. + UseIDToken bool } // TokenSource returns a JWT TokenSource using the configuration @@ -97,9 +105,10 @@ func (js jwtSource) Token() (*oauth2.Token, error) { } hc := oauth2.NewClient(js.ctx, nil) claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + PrivateClaims: js.conf.PrivateClaims, } if subject := js.conf.Subject; subject != "" { claimSet.Sub = subject @@ -166,5 +175,11 @@ func (js jwtSource) Token() (*oauth2.Token, error) { } token.Expiry = time.Unix(claimSet.Exp, 0) } + if js.conf.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("oauth2: response doesn't have JWT token") + } + token.AccessToken = tokenRes.IDToken + } return token, nil } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 428283f0b0..291df5c833 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -117,7 +117,7 @@ var ( // ApprovalForce forces the users to view the consent dialog // and confirm the permissions request at the URL returned // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") + ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent") ) // An AuthCodeOption is passed to Config.AuthCodeURL. diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd746..0000000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e968..0000000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 733099041f..0000000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go deleted file mode 100644 index 7f096fef07..0000000000 --- a/vendor/golang.org/x/sync/semaphore/semaphore.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package semaphore provides a weighted semaphore implementation. -package semaphore // import "golang.org/x/sync/semaphore" - -import ( - "container/list" - "context" - "sync" -) - -type waiter struct { - n int64 - ready chan<- struct{} // Closed when semaphore acquired. -} - -// NewWeighted creates a new weighted semaphore with the given -// maximum combined weight for concurrent access. -func NewWeighted(n int64) *Weighted { - w := &Weighted{size: n} - return w -} - -// Weighted provides a way to bound concurrent access to a resource. -// The callers can request access with a given weight. -type Weighted struct { - size int64 - cur int64 - mu sync.Mutex - waiters list.List -} - -// Acquire acquires the semaphore with a weight of n, blocking until resources -// are available or ctx is done. On success, returns nil. On failure, returns -// ctx.Err() and leaves the semaphore unchanged. -// -// If ctx is already done, Acquire may still succeed without blocking. -func (s *Weighted) Acquire(ctx context.Context, n int64) error { - s.mu.Lock() - if s.size-s.cur >= n && s.waiters.Len() == 0 { - s.cur += n - s.mu.Unlock() - return nil - } - - if n > s.size { - // Don't make other Acquire calls block on one that's doomed to fail. - s.mu.Unlock() - <-ctx.Done() - return ctx.Err() - } - - ready := make(chan struct{}) - w := waiter{n: n, ready: ready} - elem := s.waiters.PushBack(w) - s.mu.Unlock() - - select { - case <-ctx.Done(): - err := ctx.Err() - s.mu.Lock() - select { - case <-ready: - // Acquired the semaphore after we were canceled. Rather than trying to - // fix up the queue, just pretend we didn't notice the cancelation. - err = nil - default: - s.waiters.Remove(elem) - } - s.mu.Unlock() - return err - - case <-ready: - return nil - } -} - -// TryAcquire acquires the semaphore with a weight of n without blocking. -// On success, returns true. On failure, returns false and leaves the semaphore unchanged. -func (s *Weighted) TryAcquire(n int64) bool { - s.mu.Lock() - success := s.size-s.cur >= n && s.waiters.Len() == 0 - if success { - s.cur += n - } - s.mu.Unlock() - return success -} - -// Release releases the semaphore with a weight of n. -func (s *Weighted) Release(n int64) { - s.mu.Lock() - s.cur -= n - if s.cur < 0 { - s.mu.Unlock() - panic("semaphore: released more than held") - } - for { - next := s.waiters.Front() - if next == nil { - break // No more waiters blocked. - } - - w := next.Value.(waiter) - if s.size-s.cur < w.n { - // Not enough tokens for the next waiter. We could keep going (to try to - // find a waiter with a smaller request), but under load that could cause - // starvation for large requests; instead, we leave all remaining waiters - // blocked. - // - // Consider a semaphore used as a read-write lock, with N tokens, N - // readers, and one writer. Each reader can Acquire(1) to obtain a read - // lock. The writer can Acquire(N) to obtain a write lock, excluding all - // of the readers. If we allow the readers to jump ahead in the queue, - // the writer will starve — there is always one token available for every - // reader. - break - } - - s.cur += w.n - s.waiters.Remove(next) - close(w.ready) - } - s.mu.Unlock() -} diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS index f73b725745..f07029059d 100644 --- a/vendor/google.golang.org/api/AUTHORS +++ b/vendor/google.golang.org/api/AUTHORS @@ -8,3 +8,4 @@ # Please keep the list sorted. Google Inc. +LightStep Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS index fe55ebff07..788677b8f0 100644 --- a/vendor/google.golang.org/api/CONTRIBUTORS +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -45,6 +45,7 @@ Jason Hall Johan Euphrosine Kostik Shtoyk Kunpei Sakai +Matthew Dolan Matthew Whisenhunt Michael McGreevy Nick Craig-Wood diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index e3aeff8104..9057dfe739 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -759,7 +759,7 @@ ] }, "create": { - "description": "Request that a new Project be created. The result is an Operation which\ncan be used to track the creation process. It is automatically deleted\nafter a few hours, so there is no need to call DeleteOperation.\n\nOur SLO permits Project creation to take up to 30 seconds at the 90th\npercentile. As of 2016-08-29, we are observing 6 seconds 50th percentile\nlatency. 95th percentile latency is around 11 seconds. We recommend\npolling at the 5th second with an exponential backoff.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.create` on the specified parent for the new\nproject. The parent is identified by a specified ResourceId,\nwhich must include both an ID and a type, such as organization.\n\nThis method does not associate the new project with a billing account.\nYou can set or update the billing account associated with a project using\nthe [`projects.updateBillingInfo`]\n(/billing/reference/rest/v1/projects/updateBillingInfo) method.", + "description": "Request that a new Project be created. The result is an Operation which\ncan be used to track the creation process. This process usually takes a few\nseconds, but can sometimes take much longer. The tracking Operation is\nautomatically deleted after a few hours, so there is no need to call\nDeleteOperation.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.create` on the specified parent for the new\nproject. The parent is identified by a specified ResourceId,\nwhich must include both an ID and a type, such as organization.\n\nThis method does not associate the new project with a billing account.\nYou can set or update the billing account associated with a project using\nthe [`projects.updateBillingInfo`]\n(/billing/reference/rest/v1/projects/updateBillingInfo) method.", "flatPath": "v1/projects", "httpMethod": "POST", "id": "cloudresourcemanager.projects.create", @@ -1031,7 +1031,7 @@ ] }, "setIamPolicy": { - "description": "Sets the IAM access control policy for the specified Project. Overwrites\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted only to `user` and `serviceAccount`.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\n+ This method will replace the existing policy, and cannot be used to\nappend additional IAM settings.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", + "description": "Sets the IAM access control policy for the specified Project. Overwrites\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted to a `user`, `serviceAccount`, or a group\nthat is part of an organization. For example,\ngroup@myownpersonaldomain.com could be added as an owner to a project in\nthe myownpersonaldomain.com organization, but not the examplepetstore.com\norganization.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\n+ This method will replace the existing policy, and cannot be used to\nappend additional IAM settings.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", "flatPath": "v1/projects/{resource}:setIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.projects.setIamPolicy", @@ -1170,7 +1170,7 @@ } } }, - "revision": "20190424", + "revision": "20191018", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1185,7 +1185,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"fooservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:bar@gmail.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts foo@gmail.com from DATA_READ logging, and\nbar@gmail.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -1203,7 +1203,7 @@ "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { @@ -1241,7 +1241,7 @@ "description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", "items": { "type": "string" }, @@ -1464,7 +1464,12 @@ "GetIamPolicyRequest": { "description": "Request message for `GetIamPolicy` method.", "id": "GetIamPolicyRequest", - "properties": {}, + "properties": { + "options": { + "$ref": "GetPolicyOptions", + "description": "OPTIONAL: A `GetPolicyOptions` object for specifying options to\n`GetIamPolicy`. This field is only used by Cloud IAM." + } + }, "type": "object" }, "GetOrgPolicyRequest": { @@ -1478,6 +1483,18 @@ }, "type": "object" }, + "GetPolicyOptions": { + "description": "Encapsulates settings provided to GetIamPolicy.", + "id": "GetPolicyOptions", + "properties": { + "requestedPolicyVersion": { + "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "Lien": { "description": "A Lien represents an encumbrance on the actions that can be performed on a\nresource.", "id": "Lien", @@ -1615,7 +1632,7 @@ "type": "object" }, "ListPolicy": { - "description": "Used in `policy_type` to specify how `list_policy` behaves at this\nresource.\n\n`ListPolicy` can define specific values and subtrees of Cloud Resource\nManager resource hierarchy (`Organizations`, `Folders`, `Projects`) that\nare allowed or denied by setting the `allowed_values` and `denied_values`\nfields. This is achieved by using the `under:` and optional `is:` prefixes.\nThe `under:` prefix is used to denote resource subtree values.\nThe `is:` prefix is used to denote specific values, and is required only\nif the value contains a \":\". Values prefixed with \"is:\" are treated the\nsame as values with no prefix.\nAncestry subtrees must be in one of the following formats:\n - “projects/\u003cproject-id\u003e”, e.g. “projects/tokyo-rain-123”\n - “folders/\u003cfolder-id\u003e”, e.g. “folders/1234”\n - “organizations/\u003corganization-id\u003e”, e.g. “organizations/1234”\nThe `supports_under` field of the associated `Constraint` defines whether\nancestry prefixes can be used. You can set `allowed_values` and\n`denied_values` in the same `Policy` if `all_values` is\n`ALL_VALUES_UNSPECIFIED`. `ALLOW` or `DENY` are used to allow or deny all\nvalues. If `all_values` is set to either `ALLOW` or `DENY`,\n`allowed_values` and `denied_values` must be unset.", + "description": "Used in `policy_type` to specify how `list_policy` behaves at this\nresource.\n\n`ListPolicy` can define specific values and subtrees of Cloud Resource\nManager resource hierarchy (`Organizations`, `Folders`, `Projects`) that\nare allowed or denied by setting the `allowed_values` and `denied_values`\nfields. This is achieved by using the `under:` and optional `is:` prefixes.\nThe `under:` prefix is used to denote resource subtree values.\nThe `is:` prefix is used to denote specific values, and is required only\nif the value contains a \":\". Values prefixed with \"is:\" are treated the\nsame as values with no prefix.\nAncestry subtrees must be in one of the following formats:\n - \"projects/\u003cproject-id\u003e\", e.g. \"projects/tokyo-rain-123\"\n - \"folders/\u003cfolder-id\u003e\", e.g. \"folders/1234\"\n - \"organizations/\u003corganization-id\u003e\", e.g. \"organizations/1234\"\nThe `supports_under` field of the associated `Constraint` defines whether\nancestry prefixes can be used. You can set `allowed_values` and\n`denied_values` in the same `Policy` if `all_values` is\n`ALL_VALUES_UNSPECIFIED`. `ALLOW` or `DENY` are used to allow or deny all\nvalues. If `all_values` is set to either `ALLOW` or `DENY`,\n`allowed_values` and `denied_values` must be unset.", "id": "ListPolicy", "properties": { "allValues": { @@ -1647,7 +1664,7 @@ "type": "array" }, "inheritFromParent": { - "description": "Determines the inheritance behavior for this `Policy`.\n\nBy default, a `ListPolicy` set at a resource supercedes any `Policy` set\nanywhere up the resource hierarchy. However, if `inherit_from_parent` is\nset to `true`, then the values from the effective `Policy` of the parent\nresource are inherited, meaning the values set in this `Policy` are\nadded to the values inherited up the hierarchy.\n\nSetting `Policy` hierarchies that inherit both allowed values and denied\nvalues isn't recommended in most circumstances to keep the configuration\nsimple and understandable. However, it is possible to set a `Policy` with\n`allowed_values` set that inherits a `Policy` with `denied_values` set.\nIn this case, the values that are allowed must be in `allowed_values` and\nnot present in `denied_values`.\n\nFor example, suppose you have a `Constraint`\n`constraints/serviceuser.services`, which has a `constraint_type` of\n`list_constraint`, and with `constraint_default` set to `ALLOW`.\nSuppose that at the Organization level, a `Policy` is applied that\nrestricts the allowed API activations to {`E1`, `E2`}. Then, if a\n`Policy` is applied to a project below the Organization that has\n`inherit_from_parent` set to `false` and field all_values set to DENY,\nthen an attempt to activate any API will be denied.\n\nThe following examples demonstrate different possible layerings for\n`projects/bar` parented by `organizations/foo`:\n\nExample 1 (no inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: “E1” allowed_values:”E2”}\n `projects/bar` has `inherit_from_parent` `false` and values:\n {allowed_values: \"E3\" allowed_values: \"E4\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E3`, and `E4`.\n\nExample 2 (inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: “E1” allowed_values:”E2”}\n `projects/bar` has a `Policy` with values:\n {value: “E3” value: ”E4” inherit_from_parent: true}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E1`, `E2`, `E3`, and `E4`.\n\nExample 3 (inheriting both allowed and denied values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {denied_values: \"E1\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe value accepted at `projects/bar` is `E2`.\n\nExample 4 (RestoreDefault):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: “E1” allowed_values:”E2”}\n `projects/bar` has a `Policy` with values:\n {RestoreDefault: {}}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 5 (no policy inherits parent policy):\n `organizations/foo` has no `Policy` set.\n `projects/bar` has no `Policy` set.\nThe accepted values at both levels are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 6 (ListConstraint allowing all):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: “E1” allowed_values: ”E2”}\n `projects/bar` has a `Policy` with:\n {all: ALLOW}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nAny value is accepted at `projects/bar`.\n\nExample 7 (ListConstraint allowing none):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: “E1” allowed_values: ”E2”}\n `projects/bar` has a `Policy` with:\n {all: DENY}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nNo value is accepted at `projects/bar`.\n\nExample 10 (allowed and denied subtrees of Resource Manager hierarchy):\nGiven the following resource hierarchy\n O1-\u003e{F1, F2}; F1-\u003e{P1}; F2-\u003e{P2, P3},\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"under:organizations/O1\"}\n `projects/bar` has a `Policy` with:\n {allowed_values: \"under:projects/P3\"}\n {denied_values: \"under:folders/F2\"}\nThe accepted values at `organizations/foo` are `organizations/O1`,\n `folders/F1`, `folders/F2`, `projects/P1`, `projects/P2`,\n `projects/P3`.\nThe accepted values at `projects/bar` are `organizations/O1`,\n `folders/F1`, `projects/P1`.", + "description": "Determines the inheritance behavior for this `Policy`.\n\nBy default, a `ListPolicy` set at a resource supercedes any `Policy` set\nanywhere up the resource hierarchy. However, if `inherit_from_parent` is\nset to `true`, then the values from the effective `Policy` of the parent\nresource are inherited, meaning the values set in this `Policy` are\nadded to the values inherited up the hierarchy.\n\nSetting `Policy` hierarchies that inherit both allowed values and denied\nvalues isn't recommended in most circumstances to keep the configuration\nsimple and understandable. However, it is possible to set a `Policy` with\n`allowed_values` set that inherits a `Policy` with `denied_values` set.\nIn this case, the values that are allowed must be in `allowed_values` and\nnot present in `denied_values`.\n\nFor example, suppose you have a `Constraint`\n`constraints/serviceuser.services`, which has a `constraint_type` of\n`list_constraint`, and with `constraint_default` set to `ALLOW`.\nSuppose that at the Organization level, a `Policy` is applied that\nrestricts the allowed API activations to {`E1`, `E2`}. Then, if a\n`Policy` is applied to a project below the Organization that has\n`inherit_from_parent` set to `false` and field all_values set to DENY,\nthen an attempt to activate any API will be denied.\n\nThe following examples demonstrate different possible layerings for\n`projects/bar` parented by `organizations/foo`:\n\nExample 1 (no inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has `inherit_from_parent` `false` and values:\n {allowed_values: \"E3\" allowed_values: \"E4\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E3`, and `E4`.\n\nExample 2 (inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has a `Policy` with values:\n {value: \"E3\" value: \"E4\" inherit_from_parent: true}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E1`, `E2`, `E3`, and `E4`.\n\nExample 3 (inheriting both allowed and denied values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {denied_values: \"E1\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe value accepted at `projects/bar` is `E2`.\n\nExample 4 (RestoreDefault):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has a `Policy` with values:\n {RestoreDefault: {}}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 5 (no policy inherits parent policy):\n `organizations/foo` has no `Policy` set.\n `projects/bar` has no `Policy` set.\nThe accepted values at both levels are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 6 (ListConstraint allowing all):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {all: ALLOW}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nAny value is accepted at `projects/bar`.\n\nExample 7 (ListConstraint allowing none):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {all: DENY}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nNo value is accepted at `projects/bar`.\n\nExample 10 (allowed and denied subtrees of Resource Manager hierarchy):\nGiven the following resource hierarchy\n O1-\u003e{F1, F2}; F1-\u003e{P1}; F2-\u003e{P2, P3},\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"under:organizations/O1\"}\n `projects/bar` has a `Policy` with:\n {allowed_values: \"under:projects/P3\"}\n {denied_values: \"under:folders/F2\"}\nThe accepted values at `organizations/foo` are `organizations/O1`,\n `folders/F1`, `folders/F2`, `projects/P1`, `projects/P2`,\n `projects/P3`.\nThe accepted values at `projects/bar` are `organizations/O1`,\n `folders/F1`, `projects/P1`.", "type": "boolean" }, "suggestedValue": { @@ -1696,7 +1713,7 @@ "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -1753,16 +1770,16 @@ "id": "Organization", "properties": { "creationTime": { - "description": "Timestamp when the Organization was created. Assigned by the server.\n@OutputOnly", + "description": "Timestamp when the Organization was created. Assigned by the server.", "format": "google-datetime", "type": "string" }, "displayName": { - "description": "A human-readable string that refers to the Organization in the\nGCP Console UI. This string is set by the server and cannot be\nchanged. The string will be set to the primary domain (for example,\n\"google.com\") of the G Suite customer that owns the organization.\n@OutputOnly", + "description": "A human-readable string that refers to the Organization in the\nGCP Console UI. This string is set by the server and cannot be\nchanged. The string will be set to the primary domain (for example,\n\"google.com\") of the G Suite customer that owns the organization.", "type": "string" }, "lifecycleState": { - "description": "The organization's current lifecycle state. Assigned by the server.\n@OutputOnly", + "description": "The organization's current lifecycle state. Assigned by the server.", "enum": [ "LIFECYCLE_STATE_UNSPECIFIED", "ACTIVE", @@ -1776,7 +1793,7 @@ "type": "string" }, "name": { - "description": "Output Only. The resource name of the organization. This is the\norganization's relative path in the API. Its format is\n\"organizations/[organization_id]\". For example, \"organizations/1234\".", + "description": "Output only. The resource name of the organization. This is the\norganization's relative path in the API. Its format is\n\"organizations/[organization_id]\". For example, \"organizations/1234\".", "type": "string" }, "owner": { @@ -1798,7 +1815,7 @@ "type": "object" }, "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**JSON Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\n**YAML Example**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-other-app@appspot.gserviceaccount.com\n role: roles/owner\n - members:\n - user:sean@example.com\n role: roles/viewer\n\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam/docs).", + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions (defined by IAM or configured by users). A `binding` can\noptionally specify a `condition`, which is a logic expression that further\nconstrains the role binding based on attributes about the request and/or\ntarget resource.\n\n**JSON Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c\n timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ]\n }\n\n**YAML Example**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam/docs).", "id": "Policy", "properties": { "auditConfigs": { @@ -1809,19 +1826,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`.\n`bindings` with no members will result in an error.", + "description": "Associates a list of `members` to a `role`. Optionally may specify a\n`condition` that determines when binding is in effect.\n`bindings` with no members will result in an error.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten. Due to blind-set semantics of an etag-less policy,\n'setIamPolicy' will not fail even if either of incoming or stored policy\ndoes not meet the version requirements.", "format": "byte", "type": "string" }, "version": { - "description": "Deprecated.", + "description": "Specifies the format of the policy.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nOperations affecting conditional bindings must specify version 3. This can\nbe either setting a conditional policy, modifying a conditional binding,\nor removing a conditional binding from the stored conditional policy.\nOperations on non-conditional policies may specify any valid value or\nleave the field unset.\n\nIf no etag is provided in the call to `setIamPolicy`, any version\ncompliance checks on the incoming and/or stored policy is skipped.", "format": "int32", "type": "integer" } @@ -1909,7 +1926,7 @@ "type": "string" }, "type": { - "description": "Required field representing the resource type this id is for.\nAt present, the valid types are: \"organization\" and \"folder\".", + "description": "Required field representing the resource type this id is for.\nAt present, the valid types are: \"organization\", \"folder\", and \"project\".", "type": "string" } }, @@ -1926,7 +1943,7 @@ "id": "SearchOrganizationsRequest", "properties": { "filter": { - "description": "An optional query string used to filter the Organizations to return in\nthe response. Filter rules are case-insensitive.\n\n\nOrganizations may be filtered by `owner.directoryCustomerId` or by\n`domain`, where the domain is a G Suite domain, for example:\n\n| Filter | Description |\n|-------------------------------------|----------------------------------|\n| owner.directorycustomerid:123456789 | Organizations with `owner.directory_customer_id` equal to `123456789`.|\n| domain:google.com | Organizations corresponding to the domain `google.com`.|\n\nThis field is optional.", + "description": "An optional query string used to filter the Organizations to return in\nthe response. Filter rules are case-insensitive.\n\n\nOrganizations may be filtered by `owner.directoryCustomerId` or by\n`domain`, where the domain is a G Suite domain, for example:\n\n* Filter `owner.directorycustomerid:123456789` returns Organization\nresources with `owner.directory_customer_id` equal to `123456789`.\n* Filter `domain:google.com` returns Organization resources corresponding\nto the domain `google.com`.\n\nThis field is optional.", "type": "string" }, "pageSize": { @@ -1987,7 +2004,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error\nmessage, and error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` that can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", "id": "Status", "properties": { "code": { diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 1bf45d06b1..e40e175896 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -53,8 +53,8 @@ import ( "strconv" "strings" - gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" htransport "google.golang.org/api/transport/http" ) @@ -247,7 +247,7 @@ func (s *Ancestor) MarshalJSON() ([]byte, error) { // { // "log_type": "DATA_READ", // "exempted_members": [ -// "user:foo@gmail.com" +// "user:jose@example.com" // ] // }, // { @@ -259,7 +259,7 @@ func (s *Ancestor) MarshalJSON() ([]byte, error) { // ] // }, // { -// "service": "fooservice.googleapis.com" +// "service": "sampleservice.googleapis.com" // "audit_log_configs": [ // { // "log_type": "DATA_READ", @@ -267,7 +267,7 @@ func (s *Ancestor) MarshalJSON() ([]byte, error) { // { // "log_type": "DATA_WRITE", // "exempted_members": [ -// "user:bar@gmail.com" +// "user:aliya@example.com" // ] // } // ] @@ -275,11 +275,11 @@ func (s *Ancestor) MarshalJSON() ([]byte, error) { // ] // } // -// For fooservice, this policy enables DATA_READ, DATA_WRITE and +// For sampleservice, this policy enables DATA_READ, DATA_WRITE and // ADMIN_READ -// logging. It also exempts foo@gmail.com from DATA_READ logging, +// logging. It also exempts jose@example.com from DATA_READ logging, // and -// bar@gmail.com from DATA_WRITE logging. +// aliya@example.com from DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. @@ -325,7 +325,7 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { // { // "log_type": "DATA_READ", // "exempted_members": [ -// "user:foo@gmail.com" +// "user:jose@example.com" // ] // }, // { @@ -336,7 +336,7 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while // exempting -// foo@gmail.com from DATA_READ logging. +// jose@example.com from DATA_READ logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging // for this type of @@ -401,7 +401,7 @@ type Binding struct { // // * `user:{emailid}`: An email address that represents a specific // Google - // account. For example, `alice@gmail.com` . + // account. For example, `alice@example.com` . // // // * `serviceAccount:{emailid}`: An email address that represents a @@ -915,6 +915,32 @@ func (s *GetEffectiveOrgPolicyRequest) MarshalJSON() ([]byte, error) { // GetIamPolicyRequest: Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { + // Options: OPTIONAL: A `GetPolicyOptions` object for specifying options + // to + // `GetIamPolicy`. This field is only used by Cloud IAM. + Options *GetPolicyOptions `json:"options,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Options") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Options") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod GetIamPolicyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GetOrgPolicyRequest: The request sent to the GetOrgPolicy method. @@ -945,6 +971,47 @@ func (s *GetOrgPolicyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GetPolicyOptions: Encapsulates settings provided to GetIamPolicy. +type GetPolicyOptions struct { + // RequestedPolicyVersion: Optional. The policy format version to be + // returned. + // + // Valid values are 0, 1, and 3. Requests specifying an invalid value + // will be + // rejected. + // + // Requests for policies with any conditional bindings must specify + // version 3. + // Policies without any conditional bindings may specify any valid value + // or + // leave the field unset. + RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "RequestedPolicyVersion") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequestedPolicyVersion") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) { + type NoMethod GetPolicyOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Lien: A Lien represents an encumbrance on the actions that can be // performed on a // resource. @@ -1282,11 +1349,10 @@ func (s *ListOrgPoliciesResponse) MarshalJSON() ([]byte, error) { // the // same as values with no prefix. // Ancestry subtrees must be in one of the following formats: -// - “projects/”, e.g. -// “projects/tokyo-rain-123” -// - “folders/”, e.g. “folders/1234” -// - “organizations/”, e.g. -// “organizations/1234” +// - "projects/", e.g. "projects/tokyo-rain-123" +// - "folders/", e.g. "folders/1234" +// - "organizations/", e.g. +// "organizations/1234" // The `supports_under` field of the associated `Constraint` defines // whether // ancestry prefixes can be used. You can set `allowed_values` @@ -1365,7 +1431,7 @@ type ListPolicy struct { // // Example 1 (no inherited values): // `organizations/foo` has a `Policy` with values: - // {allowed_values: “E1” allowed_values:”E2”} + // {allowed_values: "E1" allowed_values:"E2"} // `projects/bar` has `inherit_from_parent` `false` and values: // {allowed_values: "E3" allowed_values: "E4"} // The accepted values at `organizations/foo` are `E1`, `E2`. @@ -1373,9 +1439,9 @@ type ListPolicy struct { // // Example 2 (inherited values): // `organizations/foo` has a `Policy` with values: - // {allowed_values: “E1” allowed_values:”E2”} + // {allowed_values: "E1" allowed_values:"E2"} // `projects/bar` has a `Policy` with values: - // {value: “E3” value: ”E4” inherit_from_parent: true} + // {value: "E3" value: "E4" inherit_from_parent: true} // The accepted values at `organizations/foo` are `E1`, `E2`. // The accepted values at `projects/bar` are `E1`, `E2`, `E3`, and // `E4`. @@ -1390,7 +1456,7 @@ type ListPolicy struct { // // Example 4 (RestoreDefault): // `organizations/foo` has a `Policy` with values: - // {allowed_values: “E1” allowed_values:”E2”} + // {allowed_values: "E1" allowed_values:"E2"} // `projects/bar` has a `Policy` with values: // {RestoreDefault: {}} // The accepted values at `organizations/foo` are `E1`, `E2`. @@ -1409,7 +1475,7 @@ type ListPolicy struct { // // Example 6 (ListConstraint allowing all): // `organizations/foo` has a `Policy` with values: - // {allowed_values: “E1” allowed_values: ”E2”} + // {allowed_values: "E1" allowed_values: "E2"} // `projects/bar` has a `Policy` with: // {all: ALLOW} // The accepted values at `organizations/foo` are `E1`, E2`. @@ -1417,7 +1483,7 @@ type ListPolicy struct { // // Example 7 (ListConstraint allowing none): // `organizations/foo` has a `Policy` with values: - // {allowed_values: “E1” allowed_values: ”E2”} + // {allowed_values: "E1" allowed_values: "E2"} // `projects/bar` has a `Policy` with: // {all: DENY} // The accepted values at `organizations/foo` are `E1`, E2`. @@ -1559,7 +1625,8 @@ type Operation struct { // service that // originally returns it. If you use the default HTTP mapping, // the - // `name` should have the format of `operations/some/unique/name`. + // `name` should be a resource name ending with + // `operations/{unique_id}`. Name string `json:"name,omitempty"` // Response: The normal response of the operation in case of success. @@ -1699,7 +1766,6 @@ func (s *OrgPolicy) MarshalJSON() ([]byte, error) { type Organization struct { // CreationTime: Timestamp when the Organization was created. Assigned // by the server. - // @OutputOnly CreationTime string `json:"creationTime,omitempty"` // DisplayName: A human-readable string that refers to the Organization @@ -1708,14 +1774,11 @@ type Organization struct { // be // changed. The string will be set to the primary domain (for // example, - // "google.com") of the G Suite customer that owns the - // organization. - // @OutputOnly + // "google.com") of the G Suite customer that owns the organization. DisplayName string `json:"displayName,omitempty"` // LifecycleState: The organization's current lifecycle state. Assigned // by the server. - // @OutputOnly // // Possible values: // "LIFECYCLE_STATE_UNSPECIFIED" - Unspecified state. This is only @@ -1725,7 +1788,7 @@ type Organization struct { // by the user. LifecycleState string `json:"lifecycleState,omitempty"` - // Name: Output Only. The resource name of the organization. This is + // Name: Output only. The resource name of the organization. This is // the // organization's relative path in the API. Its format // is @@ -1806,31 +1869,43 @@ func (s *OrganizationOwner) MarshalJSON() ([]byte, error) { // specify access control policies for Cloud Platform resources. // // -// A `Policy` consists of a list of `bindings`. A `binding` binds a list -// of -// `members` to a `role`, where the members can be user accounts, Google -// groups, -// Google domains, and service accounts. A `role` is a named list of -// permissions -// defined by IAM. +// A `Policy` is a collection of `bindings`. A `binding` binds one or +// more +// `members` to a single `role`. Members can be user accounts, service +// accounts, +// Google groups, and domains (such as G Suite). A `role` is a named +// list of +// permissions (defined by IAM or configured by users). A `binding` +// can +// optionally specify a `condition`, which is a logic expression that +// further +// constrains the role binding based on attributes about the request +// and/or +// target resource. // // **JSON Example** // // { // "bindings": [ // { -// "role": "roles/owner", +// "role": "roles/resourcemanager.organizationAdmin", // "members": [ // "user:mike@example.com", // "group:admins@example.com", // "domain:google.com", // -// "serviceAccount:my-other-app@appspot.gserviceaccount.com" +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" // ] // }, // { -// "role": "roles/viewer", -// "members": ["user:sean@example.com"] +// "role": "roles/resourcemanager.organizationViewer", +// "members": ["user:eve@example.com"], +// "condition": { +// "title": "expirable access", +// "description": "Does not grant access after Sep 2020", +// "expression": "request.time < +// timestamp('2020-10-01T00:00:00.000Z')", +// } // } // ] // } @@ -1842,12 +1917,16 @@ func (s *OrganizationOwner) MarshalJSON() ([]byte, error) { // - user:mike@example.com // - group:admins@example.com // - domain:google.com -// - serviceAccount:my-other-app@appspot.gserviceaccount.com -// role: roles/owner +// - serviceAccount:my-project-id@appspot.gserviceaccount.com +// role: roles/resourcemanager.organizationAdmin // - members: -// - user:sean@example.com -// role: roles/viewer -// +// - user:eve@example.com +// role: roles/resourcemanager.organizationViewer +// condition: +// title: expirable access +// description: Does not grant access after Sep 2020 +// expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') // // For a description of IAM and its features, see the // [IAM developer's guide](https://cloud.google.com/iam/docs). @@ -1856,7 +1935,9 @@ type Policy struct { // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - // Bindings: Associates a list of `members` to a `role`. + // Bindings: Associates a list of `members` to a `role`. Optionally may + // specify a + // `condition` that determines when binding is in effect. // `bindings` with no members will result in an error. Bindings []*Binding `json:"bindings,omitempty"` @@ -1877,10 +1958,32 @@ type Policy struct { // // If no `etag` is provided in the call to `setIamPolicy`, then the // existing - // policy is overwritten blindly. + // policy is overwritten. Due to blind-set semantics of an etag-less + // policy, + // 'setIamPolicy' will not fail even if either of incoming or stored + // policy + // does not meet the version requirements. Etag string `json:"etag,omitempty"` - // Version: Deprecated. + // Version: Specifies the format of the policy. + // + // Valid values are 0, 1, and 3. Requests specifying an invalid value + // will be + // rejected. + // + // Operations affecting conditional bindings must specify version 3. + // This can + // be either setting a conditional policy, modifying a conditional + // binding, + // or removing a conditional binding from the stored conditional + // policy. + // Operations on non-conditional policies may specify any valid value + // or + // leave the field unset. + // + // If no etag is provided in the call to `setIamPolicy`, any + // version + // compliance checks on the incoming and/or stored policy is skipped. Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2087,7 +2190,8 @@ type ResourceId struct { // Type: Required field representing the resource type this id is // for. - // At present, the valid types are: "organization" and "folder". + // At present, the valid types are: "organization", "folder", and + // "project". Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Id") to @@ -2149,15 +2253,12 @@ type SearchOrganizationsRequest struct { // by // `domain`, where the domain is a G Suite domain, for example: // - // | Filter | Description - // - // | - // |-------------------------------------|----------------------------- - // -----| - // | owner.directorycustomerid:123456789 | Organizations with - // `owner.directory_customer_id` equal to `123456789`.| - // | domain:google.com | Organizations corresponding - // to the domain `google.com`.| + // * Filter `owner.directorycustomerid:123456789` returns + // Organization + // resources with `owner.directory_customer_id` equal to `123456789`. + // * Filter `domain:google.com` returns Organization resources + // corresponding + // to the domain `google.com`. // // This field is optional. Filter string `json:"filter,omitempty"` @@ -2317,81 +2418,14 @@ func (s *SetOrgPolicyRequest) MarshalJSON() ([]byte, error) { // suitable for // different programming environments, including REST APIs and RPC APIs. // It is -// used by [gRPC](https://github.com/grpc). The error model is designed -// to be: +// used by [gRPC](https://github.com/grpc). Each `Status` message +// contains +// three pieces of data: error code, error message, and error +// details. // -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, -// error -// message, and error details. The error code should be an enum value -// of -// google.rpc.Code, but it may accept additional error codes if needed. -// The -// error message should be a developer-facing English message that -// helps -// developers *understand* and *resolve* the error. If a localized -// user-facing -// error message is needed, put the localized message in the error -// details or -// localize it in the client. The optional error details may contain -// arbitrary -// information about the error. There is a predefined set of error -// detail types -// in the package `google.rpc` that can be used for common error -// conditions. -// -// # Language mapping -// -// The `Status` message is the logical representation of the error -// model, but it -// is not necessarily the actual wire format. When the `Status` message -// is -// exposed in different client libraries and different wire protocols, -// it can be -// mapped differently. For example, it will likely be mapped to some -// exceptions -// in Java, but more likely mapped to some error codes in C. -// -// # Other uses -// -// The error model and the `Status` message can be used in a variety -// of -// environments, either with or without APIs, to provide a -// consistent developer experience across different -// environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the -// client, -// it may embed the `Status` in the normal response to indicate the -// partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step -// may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch -// response, the -// `Status` message should be used directly inside batch response, -// one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous -// operation -// results in its response, the status of those operations should -// be -// represented directly using the `Status` message. -// -// - Logging. If some API errors are stored in logs, the message -// `Status` could -// be used directly after any stripping needed for security/privacy -// reasons. +// You can find out more about this error model and how to work with it +// in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). type Status struct { // Code: The status code, which should be an enum value of // google.rpc.Code. @@ -2554,6 +2588,7 @@ func (c *FoldersClearOrgPolicyCall) Header() http.Header { func (c *FoldersClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2701,6 +2736,7 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Header() http.Header { func (c *FoldersGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2849,6 +2885,7 @@ func (c *FoldersGetOrgPolicyCall) Header() http.Header { func (c *FoldersGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2990,6 +3027,7 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *FoldersListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3154,6 +3192,7 @@ func (c *FoldersListOrgPoliciesCall) Header() http.Header { func (c *FoldersListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3321,6 +3360,7 @@ func (c *FoldersSetOrgPolicyCall) Header() http.Header { func (c *FoldersSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3468,6 +3508,7 @@ func (c *LiensCreateCall) Header() http.Header { func (c *LiensCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3599,6 +3640,7 @@ func (c *LiensDeleteCall) Header() http.Header { func (c *LiensDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3748,6 +3790,7 @@ func (c *LiensGetCall) Header() http.Header { func (c *LiensGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3919,6 +3962,7 @@ func (c *LiensListCall) Header() http.Header { func (c *LiensListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4092,6 +4136,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4227,6 +4272,7 @@ func (c *OrganizationsClearOrgPolicyCall) Header() http.Header { func (c *OrganizationsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4376,6 +4422,7 @@ func (c *OrganizationsGetCall) Header() http.Header { func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4519,6 +4566,7 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4669,6 +4717,7 @@ func (c *OrganizationsGetIamPolicyCall) Header() http.Header { func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4817,6 +4866,7 @@ func (c *OrganizationsGetOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4958,6 +5008,7 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Header() http.Heade func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5122,6 +5173,7 @@ func (c *OrganizationsListOrgPoliciesCall) Header() http.Header { func (c *OrganizationsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5291,6 +5343,7 @@ func (c *OrganizationsSearchCall) Header() http.Header { func (c *OrganizationsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5448,6 +5501,7 @@ func (c *OrganizationsSetIamPolicyCall) Header() http.Header { func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5593,6 +5647,7 @@ func (c *OrganizationsSetOrgPolicyCall) Header() http.Header { func (c *OrganizationsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5737,6 +5792,7 @@ func (c *OrganizationsTestIamPermissionsCall) Header() http.Header { func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5877,6 +5933,7 @@ func (c *ProjectsClearOrgPolicyCall) Header() http.Header { func (c *ProjectsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5982,17 +6039,13 @@ type ProjectsCreateCall struct { // Create: Request that a new Project be created. The result is an // Operation which -// can be used to track the creation process. It is automatically -// deleted -// after a few hours, so there is no need to call DeleteOperation. -// -// Our SLO permits Project creation to take up to 30 seconds at the -// 90th -// percentile. As of 2016-08-29, we are observing 6 seconds 50th -// percentile -// latency. 95th percentile latency is around 11 seconds. We -// recommend -// polling at the 5th second with an exponential backoff. +// can be used to track the creation process. This process usually takes +// a few +// seconds, but can sometimes take much longer. The tracking Operation +// is +// automatically deleted after a few hours, so there is no need to +// call +// DeleteOperation. // // Authorization requires the Google IAM // permission @@ -6042,6 +6095,7 @@ func (c *ProjectsCreateCall) Header() http.Header { func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6102,7 +6156,7 @@ func (c *ProjectsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Request that a new Project be created. The result is an Operation which\ncan be used to track the creation process. It is automatically deleted\nafter a few hours, so there is no need to call DeleteOperation.\n\nOur SLO permits Project creation to take up to 30 seconds at the 90th\npercentile. As of 2016-08-29, we are observing 6 seconds 50th percentile\nlatency. 95th percentile latency is around 11 seconds. We recommend\npolling at the 5th second with an exponential backoff.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.create` on the specified parent for the new\nproject. The parent is identified by a specified ResourceId,\nwhich must include both an ID and a type, such as organization.\n\nThis method does not associate the new project with a billing account.\nYou can set or update the billing account associated with a project using\nthe [`projects.updateBillingInfo`]\n(/billing/reference/rest/v1/projects/updateBillingInfo) method.", + // "description": "Request that a new Project be created. The result is an Operation which\ncan be used to track the creation process. This process usually takes a few\nseconds, but can sometimes take much longer. The tracking Operation is\nautomatically deleted after a few hours, so there is no need to call\nDeleteOperation.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.create` on the specified parent for the new\nproject. The parent is identified by a specified ResourceId,\nwhich must include both an ID and a type, such as organization.\n\nThis method does not associate the new project with a billing account.\nYou can set or update the billing account associated with a project using\nthe [`projects.updateBillingInfo`]\n(/billing/reference/rest/v1/projects/updateBillingInfo) method.", // "flatPath": "v1/projects", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.create", @@ -6188,6 +6242,7 @@ func (c *ProjectsDeleteCall) Header() http.Header { func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6330,6 +6385,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6469,6 +6525,7 @@ func (c *ProjectsGetAncestryCall) Header() http.Header { func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6616,6 +6673,7 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *ProjectsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6767,6 +6825,7 @@ func (c *ProjectsGetIamPolicyCall) Header() http.Header { func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6914,6 +6973,7 @@ func (c *ProjectsGetOrgPolicyCall) Header() http.Header { func (c *ProjectsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7155,6 +7215,7 @@ func (c *ProjectsListCall) Header() http.Header { func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7316,6 +7377,7 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *ProjectsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7480,6 +7542,7 @@ func (c *ProjectsListOrgPoliciesCall) Header() http.Header { func (c *ProjectsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7616,8 +7679,15 @@ type ProjectsSetIamPolicyCall struct { // as // `members` in a `Binding` of a `Policy`. // -// + The owner role can be granted only to `user` and -// `serviceAccount`. +// + The owner role can be granted to a `user`, `serviceAccount`, or a +// group +// that is part of an organization. For +// example, +// group@myownpersonaldomain.com could be added as an owner to a project +// in +// the myownpersonaldomain.com organization, but not the +// examplepetstore.com +// organization. // // + Service accounts can be made owners of a project directly // without any restrictions. However, to be added as an owner, a user @@ -7707,6 +7777,7 @@ func (c *ProjectsSetIamPolicyCall) Header() http.Header { func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7770,7 +7841,7 @@ func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the IAM access control policy for the specified Project. Overwrites\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted only to `user` and `serviceAccount`.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\n+ This method will replace the existing policy, and cannot be used to\nappend additional IAM settings.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", + // "description": "Sets the IAM access control policy for the specified Project. Overwrites\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted to a `user`, `serviceAccount`, or a group\nthat is part of an organization. For example,\ngroup@myownpersonaldomain.com could be added as an owner to a project in\nthe myownpersonaldomain.com organization, but not the examplepetstore.com\norganization.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\n+ This method will replace the existing policy, and cannot be used to\nappend additional IAM settings.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", // "flatPath": "v1/projects/{resource}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.setIamPolicy", @@ -7851,6 +7922,7 @@ func (c *ProjectsSetOrgPolicyCall) Header() http.Header { func (c *ProjectsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7993,6 +8065,7 @@ func (c *ProjectsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8140,6 +8213,7 @@ func (c *ProjectsUndeleteCall) Header() http.Header { func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8282,6 +8356,7 @@ func (c *ProjectsUpdateCall) Header() http.Header { func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 9de328d090..a166b61865 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -24,12 +24,12 @@ } }, "basePath": "/compute/v1/projects/", - "baseUrl": "https://www.googleapis.com/compute/v1/projects/", + "baseUrl": "https://compute.googleapis.com/compute/v1/projects/", "batchPath": "batch/compute/v1", "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"VPK3KBfpaEgZ16pozGOoMYfKc0U/L7OHwc6Gh2BR8n3__eQkZu5iQsE\"", + "etag": "\"F5McR9eEaw0XRpaO3M9gbIugkbs/SUAabl0tEHVm4xtF3n0zpfUm3IU\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -89,7 +89,7 @@ "acceleratorTypes": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of accelerator types.", + "description": "Retrieves an aggregated list of accelerator types. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.acceleratorTypes.aggregatedList", "parameterOrder": [ @@ -138,7 +138,7 @@ ] }, "get": { - "description": "Returns the specified accelerator type.", + "description": "Returns the specified accelerator type. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.acceleratorTypes.get", "parameterOrder": [ @@ -180,7 +180,7 @@ ] }, "list": { - "description": "Retrieves a list of accelerator types available to the specified project.", + "description": "Retrieves a list of accelerator types available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.acceleratorTypes.list", "parameterOrder": [ @@ -241,7 +241,7 @@ "addresses": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of addresses.", + "description": "Retrieves an aggregated list of addresses. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.addresses.aggregatedList", "parameterOrder": [ @@ -290,7 +290,7 @@ ] }, "delete": { - "description": "Deletes the specified address resource.", + "description": "Deletes the specified address resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.addresses.delete", "parameterOrder": [ @@ -336,7 +336,7 @@ ] }, "get": { - "description": "Returns the specified address resource.", + "description": "Returns the specified address resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.addresses.get", "parameterOrder": [ @@ -378,7 +378,7 @@ ] }, "insert": { - "description": "Creates an address resource in the specified project using the data included in the request.", + "description": "Creates an address resource in the specified project by using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.addresses.insert", "parameterOrder": [ @@ -419,7 +419,7 @@ ] }, "list": { - "description": "Retrieves a list of addresses contained within the specified region.", + "description": "Retrieves a list of addresses contained within the specified region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.addresses.list", "parameterOrder": [ @@ -480,7 +480,7 @@ "autoscalers": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of autoscalers.", + "description": "Retrieves an aggregated list of autoscalers. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.autoscalers.aggregatedList", "parameterOrder": [ @@ -529,7 +529,7 @@ ] }, "delete": { - "description": "Deletes the specified autoscaler.", + "description": "Deletes the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.autoscalers.delete", "parameterOrder": [ @@ -575,7 +575,7 @@ ] }, "get": { - "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.autoscalers.get", "parameterOrder": [ @@ -617,7 +617,7 @@ ] }, "insert": { - "description": "Creates an autoscaler in the specified project using the data included in the request.", + "description": "Creates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.autoscalers.insert", "parameterOrder": [ @@ -658,7 +658,7 @@ ] }, "list": { - "description": "Retrieves a list of autoscalers contained within the specified zone.", + "description": "Retrieves a list of autoscalers contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.autoscalers.list", "parameterOrder": [ @@ -715,7 +715,7 @@ ] }, "patch": { - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.autoscalers.patch", "parameterOrder": [ @@ -762,7 +762,7 @@ ] }, "update": { - "description": "Updates an autoscaler in the specified project using the data included in the request.", + "description": "Updates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.autoscalers.update", "parameterOrder": [ @@ -813,7 +813,7 @@ "backendBuckets": { "methods": { "addSignedUrlKey": { - "description": "Adds a key for validating requests with signed URLs for this backend bucket.", + "description": "Adds a key for validating requests with signed URLs for this backend bucket. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.backendBuckets.addSignedUrlKey", "parameterOrder": [ @@ -853,7 +853,7 @@ ] }, "delete": { - "description": "Deletes the specified BackendBucket resource.", + "description": "Deletes the specified BackendBucket resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.backendBuckets.delete", "parameterOrder": [ @@ -891,7 +891,7 @@ ] }, "deleteSignedUrlKey": { - "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + "description": "Deletes a key for validating requests with signed URLs for this backend bucket. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.backendBuckets.deleteSignedUrlKey", "parameterOrder": [ @@ -935,7 +935,7 @@ ] }, "get": { - "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.backendBuckets.get", "parameterOrder": [ @@ -969,7 +969,7 @@ ] }, "insert": { - "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + "description": "Creates a BackendBucket resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.backendBuckets.insert", "parameterOrder": [ @@ -1002,7 +1002,7 @@ ] }, "list": { - "description": "Retrieves the list of BackendBucket resources available to the specified project.", + "description": "Retrieves the list of BackendBucket resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.backendBuckets.list", "parameterOrder": [ @@ -1051,7 +1051,7 @@ ] }, "patch": { - "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.backendBuckets.patch", "parameterOrder": [ @@ -1092,7 +1092,7 @@ ] }, "update": { - "description": "Updates the specified BackendBucket resource with the data included in the request.", + "description": "Updates the specified BackendBucket resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.backendBuckets.update", "parameterOrder": [ @@ -1137,7 +1137,7 @@ "backendServices": { "methods": { "addSignedUrlKey": { - "description": "Adds a key for validating requests with signed URLs for this backend service.", + "description": "Adds a key for validating requests with signed URLs for this backend service. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.backendServices.addSignedUrlKey", "parameterOrder": [ @@ -1177,7 +1177,7 @@ ] }, "aggregatedList": { - "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.backendServices.aggregatedList", "parameterOrder": [ @@ -1226,7 +1226,7 @@ ] }, "delete": { - "description": "Deletes the specified BackendService resource.", + "description": "Deletes the specified BackendService resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.backendServices.delete", "parameterOrder": [ @@ -1264,7 +1264,7 @@ ] }, "deleteSignedUrlKey": { - "description": "Deletes a key for validating requests with signed URLs for this backend service.", + "description": "Deletes a key for validating requests with signed URLs for this backend service. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.backendServices.deleteSignedUrlKey", "parameterOrder": [ @@ -1308,7 +1308,7 @@ ] }, "get": { - "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + "description": "Returns the specified BackendService resource. Gets a list of available backend services. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.backendServices.get", "parameterOrder": [ @@ -1342,7 +1342,7 @@ ] }, "getHealth": { - "description": "Gets the most recent health check results for this BackendService.", + "description": "Gets the most recent health check results for this BackendService. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.backendServices.getHealth", "parameterOrder": [ @@ -1378,7 +1378,7 @@ ] }, "insert": { - "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.backendServices.insert", "parameterOrder": [ @@ -1411,7 +1411,7 @@ ] }, "list": { - "description": "Retrieves the list of BackendService resources available to the specified project.", + "description": "Retrieves the list of BackendService resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.backendServices.list", "parameterOrder": [ @@ -1460,7 +1460,7 @@ ] }, "patch": { - "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.backendServices.patch", "parameterOrder": [ @@ -1501,7 +1501,7 @@ ] }, "setSecurityPolicy": { - "description": "Sets the security policy for the specified backend service.", + "description": "Sets the security policy for the specified backend service. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.backendServices.setSecurityPolicy", "parameterOrder": [ @@ -1541,7 +1541,7 @@ ] }, "update": { - "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.backendServices.update", "parameterOrder": [ @@ -1586,7 +1586,7 @@ "diskTypes": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of disk types.", + "description": "Retrieves an aggregated list of disk types. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.diskTypes.aggregatedList", "parameterOrder": [ @@ -1635,7 +1635,7 @@ ] }, "get": { - "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.diskTypes.get", "parameterOrder": [ @@ -1677,7 +1677,7 @@ ] }, "list": { - "description": "Retrieves a list of disk types available to the specified project.", + "description": "Retrieves a list of disk types available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.diskTypes.list", "parameterOrder": [ @@ -1737,8 +1737,57 @@ }, "disks": { "methods": { + "addResourcePolicies": { + "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.disks.addResourcePolicies", + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "parameters": { + "disk": { + "description": "The disk name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + "request": { + "$ref": "DisksAddResourcePoliciesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "aggregatedList": { - "description": "Retrieves an aggregated list of persistent disks.", + "description": "Retrieves an aggregated list of persistent disks. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.disks.aggregatedList", "parameterOrder": [ @@ -1787,7 +1836,7 @@ ] }, "createSnapshot": { - "description": "Creates a snapshot of a specified persistent disk.", + "description": "Creates a snapshot of a specified persistent disk. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.disks.createSnapshot", "parameterOrder": [ @@ -1804,6 +1853,7 @@ "type": "string" }, "guestFlush": { + "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", "location": "query", "type": "boolean" }, @@ -1840,7 +1890,7 @@ ] }, "delete": { - "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.disks.delete", "parameterOrder": [ @@ -1885,7 +1935,7 @@ ] }, "get": { - "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.disks.get", "parameterOrder": [ @@ -1927,7 +1977,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.disks.getIamPolicy", "parameterOrder": [ @@ -1969,7 +2019,7 @@ ] }, "insert": { - "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.disks.insert", "parameterOrder": [ @@ -2015,7 +2065,7 @@ ] }, "list": { - "description": "Retrieves a list of persistent disks contained within the specified zone.", + "description": "Retrieves a list of persistent disks contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.disks.list", "parameterOrder": [ @@ -2071,8 +2121,57 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "removeResourcePolicies": { + "description": "Removes resource policies from a disk. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.disks.removeResourcePolicies", + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "parameters": { + "disk": { + "description": "The disk name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", + "request": { + "$ref": "DisksRemoveResourcePoliciesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "resize": { - "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", + "description": "Resizes the specified persistent disk. You can only increase the size of the disk. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.disks.resize", "parameterOrder": [ @@ -2121,7 +2220,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.disks.setIamPolicy", "parameterOrder": [ @@ -2165,7 +2264,7 @@ ] }, "setLabels": { - "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.disks.setLabels", "parameterOrder": [ @@ -2214,7 +2313,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.disks.testIamPermissions", "parameterOrder": [ @@ -2260,10 +2359,241 @@ } } }, + "externalVpnGateways": { + "methods": { + "delete": { + "description": "Deletes the specified externalVpnGateway. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "DELETE", + "id": "compute.externalVpnGateways.delete", + "parameterOrder": [ + "project", + "externalVpnGateway" + ], + "parameters": { + "externalVpnGateway": { + "description": "Name of the externalVpnGateways to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.externalVpnGateways.get", + "parameterOrder": [ + "project", + "externalVpnGateway" + ], + "parameters": { + "externalVpnGateway": { + "description": "Name of the externalVpnGateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", + "response": { + "$ref": "ExternalVpnGateway" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.externalVpnGateways.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/externalVpnGateways", + "request": { + "$ref": "ExternalVpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of ExternalVpnGateway available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.externalVpnGateways.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/externalVpnGateways", + "response": { + "$ref": "ExternalVpnGatewayList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.externalVpnGateways.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/externalVpnGateways/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.externalVpnGateways.testIamPermissions", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/externalVpnGateways/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "firewalls": { "methods": { "delete": { - "description": "Deletes the specified firewall.", + "description": "Deletes the specified firewall. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.firewalls.delete", "parameterOrder": [ @@ -2301,7 +2631,7 @@ ] }, "get": { - "description": "Returns the specified firewall.", + "description": "Returns the specified firewall. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.firewalls.get", "parameterOrder": [ @@ -2335,7 +2665,7 @@ ] }, "insert": { - "description": "Creates a firewall rule in the specified project using the data included in the request.", + "description": "Creates a firewall rule in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.firewalls.insert", "parameterOrder": [ @@ -2368,7 +2698,7 @@ ] }, "list": { - "description": "Retrieves the list of firewall rules available to the specified project.", + "description": "Retrieves the list of firewall rules available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.firewalls.list", "parameterOrder": [ @@ -2417,7 +2747,7 @@ ] }, "patch": { - "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.firewalls.patch", "parameterOrder": [ @@ -2458,7 +2788,7 @@ ] }, "update": { - "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", + "description": "Updates the specified firewall rule with the data included in the request. Note that all fields will be updated if using PUT, even fields that are not specified. To update individual fields, please use PATCH instead. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.firewalls.update", "parameterOrder": [ @@ -2503,7 +2833,7 @@ "forwardingRules": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of forwarding rules.", + "description": "Retrieves an aggregated list of forwarding rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.forwardingRules.aggregatedList", "parameterOrder": [ @@ -2552,7 +2882,7 @@ ] }, "delete": { - "description": "Deletes the specified ForwardingRule resource.", + "description": "Deletes the specified ForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.forwardingRules.delete", "parameterOrder": [ @@ -2598,7 +2928,7 @@ ] }, "get": { - "description": "Returns the specified ForwardingRule resource.", + "description": "Returns the specified ForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.forwardingRules.get", "parameterOrder": [ @@ -2640,7 +2970,7 @@ ] }, "insert": { - "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.forwardingRules.insert", "parameterOrder": [ @@ -2681,7 +3011,7 @@ ] }, "list": { - "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + "description": "Retrieves a list of ForwardingRule resources available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.forwardingRules.list", "parameterOrder": [ @@ -2738,7 +3068,7 @@ ] }, "setTarget": { - "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.forwardingRules.setTarget", "parameterOrder": [ @@ -2791,7 +3121,7 @@ "globalAddresses": { "methods": { "delete": { - "description": "Deletes the specified address resource.", + "description": "Deletes the specified address resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.globalAddresses.delete", "parameterOrder": [ @@ -2829,7 +3159,7 @@ ] }, "get": { - "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", + "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.globalAddresses.get", "parameterOrder": [ @@ -2863,7 +3193,7 @@ ] }, "insert": { - "description": "Creates an address resource in the specified project using the data included in the request.", + "description": "Creates an address resource in the specified project by using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.globalAddresses.insert", "parameterOrder": [ @@ -2896,7 +3226,7 @@ ] }, "list": { - "description": "Retrieves a list of global addresses.", + "description": "Retrieves a list of global addresses. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.globalAddresses.list", "parameterOrder": [ @@ -2949,7 +3279,7 @@ "globalForwardingRules": { "methods": { "delete": { - "description": "Deletes the specified GlobalForwardingRule resource.", + "description": "Deletes the specified GlobalForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.globalForwardingRules.delete", "parameterOrder": [ @@ -2987,7 +3317,7 @@ ] }, "get": { - "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", + "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.globalForwardingRules.get", "parameterOrder": [ @@ -3021,7 +3351,7 @@ ] }, "insert": { - "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.globalForwardingRules.insert", "parameterOrder": [ @@ -3054,7 +3384,7 @@ ] }, "list": { - "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", + "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.globalForwardingRules.list", "parameterOrder": [ @@ -3103,7 +3433,7 @@ ] }, "setTarget": { - "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.globalForwardingRules.setTarget", "parameterOrder": [ @@ -3148,7 +3478,7 @@ "globalOperations": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of all operations.", + "description": "Retrieves an aggregated list of all operations. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.globalOperations.aggregatedList", "parameterOrder": [ @@ -3197,7 +3527,7 @@ ] }, "delete": { - "description": "Deletes the specified Operations resource.", + "description": "Deletes the specified Operations resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.globalOperations.delete", "parameterOrder": [ @@ -3227,7 +3557,7 @@ ] }, "get": { - "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", + "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.globalOperations.get", "parameterOrder": [ @@ -3261,7 +3591,7 @@ ] }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified project.", + "description": "Retrieves a list of Operation resources contained within the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.globalOperations.list", "parameterOrder": [ @@ -3313,8 +3643,57 @@ }, "healthChecks": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.healthChecks.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/healthChecks", + "response": { + "$ref": "HealthChecksAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { - "description": "Deletes the specified HealthCheck resource.", + "description": "Deletes the specified HealthCheck resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.healthChecks.delete", "parameterOrder": [ @@ -3352,7 +3731,7 @@ ] }, "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.healthChecks.get", "parameterOrder": [ @@ -3386,7 +3765,7 @@ ] }, "insert": { - "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + "description": "Creates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.healthChecks.insert", "parameterOrder": [ @@ -3419,7 +3798,7 @@ ] }, "list": { - "description": "Retrieves the list of HealthCheck resources available to the specified project.", + "description": "Retrieves the list of HealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.healthChecks.list", "parameterOrder": [ @@ -3468,7 +3847,7 @@ ] }, "patch": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.healthChecks.patch", "parameterOrder": [ @@ -3509,7 +3888,7 @@ ] }, "update": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.healthChecks.update", "parameterOrder": [ @@ -3554,7 +3933,7 @@ "httpHealthChecks": { "methods": { "delete": { - "description": "Deletes the specified HttpHealthCheck resource.", + "description": "Deletes the specified HttpHealthCheck resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.httpHealthChecks.delete", "parameterOrder": [ @@ -3592,7 +3971,7 @@ ] }, "get": { - "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.httpHealthChecks.get", "parameterOrder": [ @@ -3626,7 +4005,7 @@ ] }, "insert": { - "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.httpHealthChecks.insert", "parameterOrder": [ @@ -3659,7 +4038,7 @@ ] }, "list": { - "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + "description": "Retrieves the list of HttpHealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.httpHealthChecks.list", "parameterOrder": [ @@ -3708,7 +4087,7 @@ ] }, "patch": { - "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.httpHealthChecks.patch", "parameterOrder": [ @@ -3749,7 +4128,7 @@ ] }, "update": { - "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.httpHealthChecks.update", "parameterOrder": [ @@ -3794,7 +4173,7 @@ "httpsHealthChecks": { "methods": { "delete": { - "description": "Deletes the specified HttpsHealthCheck resource.", + "description": "Deletes the specified HttpsHealthCheck resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.httpsHealthChecks.delete", "parameterOrder": [ @@ -3832,7 +4211,7 @@ ] }, "get": { - "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.httpsHealthChecks.get", "parameterOrder": [ @@ -3866,7 +4245,7 @@ ] }, "insert": { - "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.httpsHealthChecks.insert", "parameterOrder": [ @@ -3899,7 +4278,7 @@ ] }, "list": { - "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.httpsHealthChecks.list", "parameterOrder": [ @@ -3948,7 +4327,7 @@ ] }, "patch": { - "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.httpsHealthChecks.patch", "parameterOrder": [ @@ -3989,7 +4368,7 @@ ] }, "update": { - "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.httpsHealthChecks.update", "parameterOrder": [ @@ -4034,7 +4413,7 @@ "images": { "methods": { "delete": { - "description": "Deletes the specified image.", + "description": "Deletes the specified image. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.images.delete", "parameterOrder": [ @@ -4072,7 +4451,7 @@ ] }, "deprecate": { - "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.images.deprecate", "parameterOrder": [ @@ -4113,7 +4492,7 @@ ] }, "get": { - "description": "Returns the specified image. Gets a list of available images by making a list() request.", + "description": "Returns the specified image. Gets a list of available images by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.images.get", "parameterOrder": [ @@ -4147,7 +4526,7 @@ ] }, "getFromFamily": { - "description": "Returns the latest image that is part of an image family and is not deprecated.", + "description": "Returns the latest image that is part of an image family and is not deprecated. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.images.getFromFamily", "parameterOrder": [ @@ -4181,7 +4560,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.images.getIamPolicy", "parameterOrder": [ @@ -4215,7 +4594,7 @@ ] }, "insert": { - "description": "Creates an image in the specified project using the data included in the request.", + "description": "Creates an image in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.images.insert", "parameterOrder": [ @@ -4256,7 +4635,7 @@ ] }, "list": { - "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.images.list", "parameterOrder": [ @@ -4305,7 +4684,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.images.setIamPolicy", "parameterOrder": [ @@ -4341,7 +4720,7 @@ ] }, "setLabels": { - "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", + "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.images.setLabels", "parameterOrder": [ @@ -4377,7 +4756,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.images.testIamPermissions", "parameterOrder": [ @@ -4418,7 +4797,7 @@ "instanceGroupManagers": { "methods": { "abandonInstances": { - "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroupManagers.abandonInstances", "parameterOrder": [ @@ -4465,7 +4844,7 @@ ] }, "aggregatedList": { - "description": "Retrieves the list of managed instance groups and groups them by zone.", + "description": "Retrieves the list of managed instance groups and groups them by zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instanceGroupManagers.aggregatedList", "parameterOrder": [ @@ -4514,7 +4893,7 @@ ] }, "delete": { - "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.instanceGroupManagers.delete", "parameterOrder": [ @@ -4558,7 +4937,7 @@ ] }, "deleteInstances": { - "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroupManagers.deleteInstances", "parameterOrder": [ @@ -4605,7 +4984,7 @@ ] }, "get": { - "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instanceGroupManagers.get", "parameterOrder": [ @@ -4645,7 +5024,7 @@ ] }, "insert": { - "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroupManagers.insert", "parameterOrder": [ @@ -4685,7 +5064,7 @@ ] }, "list": { - "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instanceGroupManagers.list", "parameterOrder": [ @@ -4741,7 +5120,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -4804,7 +5183,7 @@ ] }, "patch": { - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.instanceGroupManagers.patch", "parameterOrder": [ @@ -4851,7 +5230,7 @@ ] }, "recreateInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroupManagers.recreateInstances", "parameterOrder": [ @@ -4898,7 +5277,7 @@ ] }, "resize": { - "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroupManagers.resize", "parameterOrder": [ @@ -4950,7 +5329,7 @@ ] }, "setInstanceTemplate": { - "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroupManagers.setInstanceTemplate", "parameterOrder": [ @@ -4997,7 +5376,7 @@ ] }, "setTargetPools": { - "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroupManagers.setTargetPools", "parameterOrder": [ @@ -5048,7 +5427,7 @@ "instanceGroups": { "methods": { "addInstances": { - "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroups.addInstances", "parameterOrder": [ @@ -5095,7 +5474,7 @@ ] }, "aggregatedList": { - "description": "Retrieves the list of instance groups and sorts them by zone.", + "description": "Retrieves the list of instance groups and sorts them by zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instanceGroups.aggregatedList", "parameterOrder": [ @@ -5144,7 +5523,7 @@ ] }, "delete": { - "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.instanceGroups.delete", "parameterOrder": [ @@ -5188,7 +5567,7 @@ ] }, "get": { - "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instanceGroups.get", "parameterOrder": [ @@ -5228,7 +5607,7 @@ ] }, "insert": { - "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + "description": "Creates an instance group in the specified project using the parameters that are included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroups.insert", "parameterOrder": [ @@ -5268,7 +5647,7 @@ ] }, "list": { - "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + "description": "Retrieves the list of instance groups that are located in the specified project and zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instanceGroups.list", "parameterOrder": [ @@ -5324,7 +5703,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group.", + "description": "Lists the instances in the specified instance group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroups.listInstances", "parameterOrder": [ @@ -5390,7 +5769,7 @@ ] }, "removeInstances": { - "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroups.removeInstances", "parameterOrder": [ @@ -5437,7 +5816,7 @@ ] }, "setNamedPorts": { - "description": "Sets the named ports for the specified instance group.", + "description": "Sets the named ports for the specified instance group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceGroups.setNamedPorts", "parameterOrder": [ @@ -5488,7 +5867,7 @@ "instanceTemplates": { "methods": { "delete": { - "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", + "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.instanceTemplates.delete", "parameterOrder": [ @@ -5526,7 +5905,7 @@ ] }, "get": { - "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instanceTemplates.get", "parameterOrder": [ @@ -5560,7 +5939,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instanceTemplates.getIamPolicy", "parameterOrder": [ @@ -5594,7 +5973,7 @@ ] }, "insert": { - "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceTemplates.insert", "parameterOrder": [ @@ -5627,7 +6006,7 @@ ] }, "list": { - "description": "Retrieves a list of instance templates that are contained within the specified project.", + "description": "Retrieves a list of instance templates that are contained within the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instanceTemplates.list", "parameterOrder": [ @@ -5676,7 +6055,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceTemplates.setIamPolicy", "parameterOrder": [ @@ -5712,7 +6091,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instanceTemplates.testIamPermissions", "parameterOrder": [ @@ -5753,7 +6132,7 @@ "instances": { "methods": { "addAccessConfig": { - "description": "Adds an access config to an instance's network interface.", + "description": "Adds an access config to an instance's network interface. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.addAccessConfig", "parameterOrder": [ @@ -5809,7 +6188,7 @@ ] }, "aggregatedList": { - "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", + "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instances.aggregatedList", "parameterOrder": [ @@ -5858,7 +6237,7 @@ ] }, "attachDisk": { - "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", + "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.attachDisk", "parameterOrder": [ @@ -5912,7 +6291,7 @@ ] }, "delete": { - "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.instances.delete", "parameterOrder": [ @@ -5958,7 +6337,7 @@ ] }, "deleteAccessConfig": { - "description": "Deletes an access config from an instance's network interface.", + "description": "Deletes an access config from an instance's network interface. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.deleteAccessConfig", "parameterOrder": [ @@ -6018,7 +6397,7 @@ ] }, "detachDisk": { - "description": "Detaches a disk from an instance.", + "description": "Detaches a disk from an instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.detachDisk", "parameterOrder": [ @@ -6071,7 +6450,7 @@ ] }, "get": { - "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instances.get", "parameterOrder": [ @@ -6112,8 +6491,60 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getGuestAttributes": { + "description": "Returns the specified guest attributes entry. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.instances.getGuestAttributes", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "queryPath": { + "description": "Specifies the guest attributes path to be queried.", + "location": "query", + "type": "string" + }, + "variableKey": { + "description": "Specifies the key for the guest attributes entry.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", + "response": { + "$ref": "GuestAttributes" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instances.getIamPolicy", "parameterOrder": [ @@ -6155,7 +6586,7 @@ ] }, "getSerialPortOutput": { - "description": "Returns the last 1 MB of serial port output from the specified instance.", + "description": "Returns the last 1 MB of serial port output from the specified instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instances.getSerialPortOutput", "parameterOrder": [ @@ -6212,7 +6643,7 @@ ] }, "getShieldedInstanceIdentity": { - "description": "Returns the Shielded Instance Identity of an instance", + "description": "Returns the Shielded Instance Identity of an instance (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instances.getShieldedInstanceIdentity", "parameterOrder": [ @@ -6254,7 +6685,7 @@ ] }, "insert": { - "description": "Creates an instance resource in the specified project using the data included in the request.", + "description": "Creates an instance resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.insert", "parameterOrder": [ @@ -6300,7 +6731,7 @@ ] }, "list": { - "description": "Retrieves the list of instances contained within the specified zone.", + "description": "Retrieves the list of instances contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instances.list", "parameterOrder": [ @@ -6357,7 +6788,7 @@ ] }, "listReferrers": { - "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.instances.listReferrers", "parameterOrder": [ @@ -6422,7 +6853,7 @@ ] }, "reset": { - "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", + "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.reset", "parameterOrder": [ @@ -6468,7 +6899,7 @@ ] }, "setDeletionProtection": { - "description": "Sets deletion protection on the instance.", + "description": "Sets deletion protection on the instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setDeletionProtection", "parameterOrder": [ @@ -6520,7 +6951,7 @@ ] }, "setDiskAutoDelete": { - "description": "Sets the auto-delete flag for a disk attached to an instance.", + "description": "Sets the auto-delete flag for a disk attached to an instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setDiskAutoDelete", "parameterOrder": [ @@ -6581,7 +7012,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setIamPolicy", "parameterOrder": [ @@ -6625,7 +7056,7 @@ ] }, "setLabels": { - "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setLabels", "parameterOrder": [ @@ -6674,7 +7105,7 @@ ] }, "setMachineResources": { - "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setMachineResources", "parameterOrder": [ @@ -6723,7 +7154,7 @@ ] }, "setMachineType": { - "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + "description": "Changes the machine type for a stopped instance to the machine type specified in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setMachineType", "parameterOrder": [ @@ -6772,7 +7203,7 @@ ] }, "setMetadata": { - "description": "Sets metadata for the specified instance to the data included in the request.", + "description": "Sets metadata for the specified instance to the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setMetadata", "parameterOrder": [ @@ -6821,7 +7252,7 @@ ] }, "setMinCpuPlatform": { - "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setMinCpuPlatform", "parameterOrder": [ @@ -6870,7 +7301,7 @@ ] }, "setScheduling": { - "description": "Sets an instance's scheduling options.", + "description": "Sets an instance's scheduling options. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setScheduling", "parameterOrder": [ @@ -6919,7 +7350,7 @@ ] }, "setServiceAccount": { - "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setServiceAccount", "parameterOrder": [ @@ -6968,7 +7399,7 @@ ] }, "setShieldedInstanceIntegrityPolicy": { - "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.instances.setShieldedInstanceIntegrityPolicy", "parameterOrder": [ @@ -7017,7 +7448,7 @@ ] }, "setTags": { - "description": "Sets network tags for the specified instance to the data included in the request.", + "description": "Sets network tags for the specified instance to the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.setTags", "parameterOrder": [ @@ -7066,7 +7497,7 @@ ] }, "simulateMaintenanceEvent": { - "description": "Simulates a maintenance event on the instance.", + "description": "Simulates a maintenance event on the instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.simulateMaintenanceEvent", "parameterOrder": [ @@ -7107,7 +7538,7 @@ ] }, "start": { - "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.start", "parameterOrder": [ @@ -7153,7 +7584,7 @@ ] }, "startWithEncryptionKey": { - "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.startWithEncryptionKey", "parameterOrder": [ @@ -7202,7 +7633,7 @@ ] }, "stop": { - "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.stop", "parameterOrder": [ @@ -7248,7 +7679,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.testIamPermissions", "parameterOrder": [ @@ -7293,7 +7724,7 @@ ] }, "updateAccessConfig": { - "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.instances.updateAccessConfig", "parameterOrder": [ @@ -7348,8 +7779,57 @@ "https://www.googleapis.com/auth/compute" ] }, + "updateDisplayDevice": { + "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "PATCH", + "id": "compute.instances.updateDisplayDevice", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", + "request": { + "$ref": "DisplayDevice" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "updateNetworkInterface": { - "description": "Updates an instance's network interface. This method follows PATCH semantics.", + "description": "Updates an instance's network interface. This method follows PATCH semantics. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.instances.updateNetworkInterface", "parameterOrder": [ @@ -7405,7 +7885,7 @@ ] }, "updateShieldedInstanceConfig": { - "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.instances.updateShieldedInstanceConfig", "parameterOrder": [ @@ -7458,7 +7938,7 @@ "interconnectAttachments": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of interconnect attachments.", + "description": "Retrieves an aggregated list of interconnect attachments. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.interconnectAttachments.aggregatedList", "parameterOrder": [ @@ -7507,7 +7987,7 @@ ] }, "delete": { - "description": "Deletes the specified interconnect attachment.", + "description": "Deletes the specified interconnect attachment. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.interconnectAttachments.delete", "parameterOrder": [ @@ -7553,7 +8033,7 @@ ] }, "get": { - "description": "Returns the specified interconnect attachment.", + "description": "Returns the specified interconnect attachment. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.interconnectAttachments.get", "parameterOrder": [ @@ -7595,7 +8075,7 @@ ] }, "insert": { - "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + "description": "Creates an InterconnectAttachment in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.interconnectAttachments.insert", "parameterOrder": [ @@ -7636,7 +8116,7 @@ ] }, "list": { - "description": "Retrieves the list of interconnect attachments contained within the specified region.", + "description": "Retrieves the list of interconnect attachments contained within the specified region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.interconnectAttachments.list", "parameterOrder": [ @@ -7693,7 +8173,7 @@ ] }, "patch": { - "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.interconnectAttachments.patch", "parameterOrder": [ @@ -7746,7 +8226,7 @@ "interconnectLocations": { "methods": { "get": { - "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.interconnectLocations.get", "parameterOrder": [ @@ -7780,7 +8260,7 @@ ] }, "list": { - "description": "Retrieves the list of interconnect locations available to the specified project.", + "description": "Retrieves the list of interconnect locations available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.interconnectLocations.list", "parameterOrder": [ @@ -7833,7 +8313,7 @@ "interconnects": { "methods": { "delete": { - "description": "Deletes the specified interconnect.", + "description": "Deletes the specified interconnect. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.interconnects.delete", "parameterOrder": [ @@ -7871,7 +8351,7 @@ ] }, "get": { - "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.interconnects.get", "parameterOrder": [ @@ -7905,7 +8385,7 @@ ] }, "getDiagnostics": { - "description": "Returns the interconnectDiagnostics for the specified interconnect.", + "description": "Returns the interconnectDiagnostics for the specified interconnect. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.interconnects.getDiagnostics", "parameterOrder": [ @@ -7939,7 +8419,7 @@ ] }, "insert": { - "description": "Creates a Interconnect in the specified project using the data included in the request.", + "description": "Creates a Interconnect in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.interconnects.insert", "parameterOrder": [ @@ -7972,7 +8452,7 @@ ] }, "list": { - "description": "Retrieves the list of interconnect available to the specified project.", + "description": "Retrieves the list of interconnect available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.interconnects.list", "parameterOrder": [ @@ -8021,7 +8501,7 @@ ] }, "patch": { - "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.interconnects.patch", "parameterOrder": [ @@ -8066,7 +8546,7 @@ "licenseCodes": { "methods": { "get": { - "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", + "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.licenseCodes.get", "parameterOrder": [ @@ -8100,7 +8580,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.licenseCodes.testIamPermissions", "parameterOrder": [ @@ -8141,7 +8621,7 @@ "licenses": { "methods": { "delete": { - "description": "Deletes the specified license.", + "description": "Deletes the specified license. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.licenses.delete", "parameterOrder": [ @@ -8179,7 +8659,7 @@ ] }, "get": { - "description": "Returns the specified License resource.", + "description": "Returns the specified License resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.licenses.get", "parameterOrder": [ @@ -8213,7 +8693,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.licenses.getIamPolicy", "parameterOrder": [ @@ -8247,7 +8727,7 @@ ] }, "insert": { - "description": "Create a License resource in the specified project.", + "description": "Create a License resource in the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.licenses.insert", "parameterOrder": [ @@ -8283,7 +8763,7 @@ ] }, "list": { - "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.licenses.list", "parameterOrder": [ @@ -8332,7 +8812,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.licenses.setIamPolicy", "parameterOrder": [ @@ -8368,7 +8848,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.licenses.testIamPermissions", "parameterOrder": [ @@ -8409,7 +8889,7 @@ "machineTypes": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of machine types.", + "description": "Retrieves an aggregated list of machine types. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.machineTypes.aggregatedList", "parameterOrder": [ @@ -8458,7 +8938,7 @@ ] }, "get": { - "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", + "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.machineTypes.get", "parameterOrder": [ @@ -8500,7 +8980,7 @@ ] }, "list": { - "description": "Retrieves a list of machine types available to the specified project.", + "description": "Retrieves a list of machine types available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.machineTypes.list", "parameterOrder": [ @@ -8561,7 +9041,7 @@ "networkEndpointGroups": { "methods": { "aggregatedList": { - "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + "description": "Retrieves the list of network endpoint groups and sorts them by zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.networkEndpointGroups.aggregatedList", "parameterOrder": [ @@ -8610,7 +9090,7 @@ ] }, "attachNetworkEndpoints": { - "description": "Attach a list of network endpoints to the specified network endpoint group.", + "description": "Attach a list of network endpoints to the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.networkEndpointGroups.attachNetworkEndpoints", "parameterOrder": [ @@ -8657,7 +9137,7 @@ ] }, "delete": { - "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", + "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.networkEndpointGroups.delete", "parameterOrder": [ @@ -8701,7 +9181,7 @@ ] }, "detachNetworkEndpoints": { - "description": "Detach a list of network endpoints from the specified network endpoint group.", + "description": "Detach a list of network endpoints from the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.networkEndpointGroups.detachNetworkEndpoints", "parameterOrder": [ @@ -8748,7 +9228,7 @@ ] }, "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.networkEndpointGroups.get", "parameterOrder": [ @@ -8788,7 +9268,7 @@ ] }, "insert": { - "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.networkEndpointGroups.insert", "parameterOrder": [ @@ -8828,7 +9308,7 @@ ] }, "list": { - "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", + "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.networkEndpointGroups.list", "parameterOrder": [ @@ -8884,7 +9364,7 @@ ] }, "listNetworkEndpoints": { - "description": "Lists the network endpoints in the specified network endpoint group.", + "description": "Lists the network endpoints in the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.networkEndpointGroups.listNetworkEndpoints", "parameterOrder": [ @@ -8950,7 +9430,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.networkEndpointGroups.testIamPermissions", "parameterOrder": [ @@ -8999,7 +9479,7 @@ "networks": { "methods": { "addPeering": { - "description": "Adds a peering to the specified network.", + "description": "Adds a peering to the specified network. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.networks.addPeering", "parameterOrder": [ @@ -9040,7 +9520,7 @@ ] }, "delete": { - "description": "Deletes the specified network.", + "description": "Deletes the specified network. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.networks.delete", "parameterOrder": [ @@ -9078,7 +9558,7 @@ ] }, "get": { - "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + "description": "Returns the specified network. Gets a list of available networks by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.networks.get", "parameterOrder": [ @@ -9112,7 +9592,7 @@ ] }, "insert": { - "description": "Creates a network in the specified project using the data included in the request.", + "description": "Creates a network in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.networks.insert", "parameterOrder": [ @@ -9145,7 +9625,7 @@ ] }, "list": { - "description": "Retrieves the list of networks available to the specified project.", + "description": "Retrieves the list of networks available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.networks.list", "parameterOrder": [ @@ -9194,7 +9674,7 @@ ] }, "patch": { - "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", + "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.networks.patch", "parameterOrder": [ @@ -9235,7 +9715,7 @@ ] }, "removePeering": { - "description": "Removes a peering from the specified network.", + "description": "Removes a peering from the specified network. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.networks.removePeering", "parameterOrder": [ @@ -9276,7 +9756,7 @@ ] }, "switchToCustomMode": { - "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + "description": "Switches the network mode from auto subnet mode to custom subnet mode. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.networks.switchToCustomMode", "parameterOrder": [ @@ -9312,13 +9792,54 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "updatePeering": { + "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "PATCH", + "id": "compute.networks.updatePeering", + "parameterOrder": [ + "project", + "network" + ], + "parameters": { + "network": { + "description": "Name of the network resource which the updated peering is belonging to.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networks/{network}/updatePeering", + "request": { + "$ref": "NetworksUpdatePeeringRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, "nodeGroups": { "methods": { "addNodes": { - "description": "Adds specified number of nodes to the node group.", + "description": "Adds specified number of nodes to the node group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeGroups.addNodes", "parameterOrder": [ @@ -9367,7 +9888,7 @@ ] }, "aggregatedList": { - "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", + "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeGroups.aggregatedList", "parameterOrder": [ @@ -9416,7 +9937,7 @@ ] }, "delete": { - "description": "Deletes the specified NodeGroup resource.", + "description": "Deletes the specified NodeGroup resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.nodeGroups.delete", "parameterOrder": [ @@ -9462,7 +9983,7 @@ ] }, "deleteNodes": { - "description": "Deletes specified nodes from the node group.", + "description": "Deletes specified nodes from the node group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeGroups.deleteNodes", "parameterOrder": [ @@ -9472,7 +9993,7 @@ ], "parameters": { "nodeGroup": { - "description": "Name of the NodeGroup resource to delete.", + "description": "Name of the NodeGroup resource whose nodes will be deleted.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -9511,7 +10032,7 @@ ] }, "get": { - "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", + "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeGroups.get", "parameterOrder": [ @@ -9553,7 +10074,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeGroups.getIamPolicy", "parameterOrder": [ @@ -9595,7 +10116,7 @@ ] }, "insert": { - "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", + "description": "Creates a NodeGroup resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeGroups.insert", "parameterOrder": [ @@ -9644,7 +10165,7 @@ ] }, "list": { - "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", + "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeGroups.list", "parameterOrder": [ @@ -9701,7 +10222,7 @@ ] }, "listNodes": { - "description": "Lists nodes in the node group.", + "description": "Lists nodes in the node group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeGroups.listNodes", "parameterOrder": [ @@ -9766,7 +10287,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeGroups.setIamPolicy", "parameterOrder": [ @@ -9810,7 +10331,7 @@ ] }, "setNodeTemplate": { - "description": "Updates the node template of the node group.", + "description": "Updates the node template of the node group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeGroups.setNodeTemplate", "parameterOrder": [ @@ -9859,7 +10380,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeGroups.testIamPermissions", "parameterOrder": [ @@ -9908,7 +10429,7 @@ "nodeTemplates": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of node templates.", + "description": "Retrieves an aggregated list of node templates. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeTemplates.aggregatedList", "parameterOrder": [ @@ -9957,7 +10478,7 @@ ] }, "delete": { - "description": "Deletes the specified NodeTemplate resource.", + "description": "Deletes the specified NodeTemplate resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.nodeTemplates.delete", "parameterOrder": [ @@ -10003,7 +10524,7 @@ ] }, "get": { - "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + "description": "Returns the specified node template. Gets a list of available node templates by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeTemplates.get", "parameterOrder": [ @@ -10045,7 +10566,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeTemplates.getIamPolicy", "parameterOrder": [ @@ -10087,7 +10608,7 @@ ] }, "insert": { - "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", + "description": "Creates a NodeTemplate resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeTemplates.insert", "parameterOrder": [ @@ -10128,7 +10649,7 @@ ] }, "list": { - "description": "Retrieves a list of node templates available to the specified project.", + "description": "Retrieves a list of node templates available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeTemplates.list", "parameterOrder": [ @@ -10185,7 +10706,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeTemplates.setIamPolicy", "parameterOrder": [ @@ -10229,7 +10750,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.nodeTemplates.testIamPermissions", "parameterOrder": [ @@ -10278,7 +10799,7 @@ "nodeTypes": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of node types.", + "description": "Retrieves an aggregated list of node types. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeTypes.aggregatedList", "parameterOrder": [ @@ -10327,7 +10848,7 @@ ] }, "get": { - "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + "description": "Returns the specified node type. Gets a list of available node types by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeTypes.get", "parameterOrder": [ @@ -10369,7 +10890,7 @@ ] }, "list": { - "description": "Retrieves a list of node types available to the specified project.", + "description": "Retrieves a list of node types available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.nodeTypes.list", "parameterOrder": [ @@ -10430,7 +10951,7 @@ "projects": { "methods": { "disableXpnHost": { - "description": "Disable this project as a shared VPC host project.", + "description": "Disable this project as a shared VPC host project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.disableXpnHost", "parameterOrder": [ @@ -10460,7 +10981,7 @@ ] }, "disableXpnResource": { - "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", + "description": "Disable a service resource (also known as service project) associated with this host project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.disableXpnResource", "parameterOrder": [ @@ -10493,7 +11014,7 @@ ] }, "enableXpnHost": { - "description": "Enable this project as a shared VPC host project.", + "description": "Enable this project as a shared VPC host project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.enableXpnHost", "parameterOrder": [ @@ -10523,7 +11044,7 @@ ] }, "enableXpnResource": { - "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", + "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.enableXpnResource", "parameterOrder": [ @@ -10556,7 +11077,7 @@ ] }, "get": { - "description": "Returns the specified Project resource.", + "description": "Returns the specified Project resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.projects.get", "parameterOrder": [ @@ -10582,7 +11103,7 @@ ] }, "getXpnHost": { - "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", + "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.projects.getXpnHost", "parameterOrder": [ @@ -10607,7 +11128,7 @@ ] }, "getXpnResources": { - "description": "Gets service resources (a.k.a service project) associated with this host project.", + "description": "Gets service resources (a.k.a service project) associated with this host project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.projects.getXpnResources", "parameterOrder": [ @@ -10655,7 +11176,7 @@ ] }, "listXpnHosts": { - "description": "Lists all shared VPC host projects visible to the user in an organization.", + "description": "Lists all shared VPC host projects visible to the user in an organization. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.listXpnHosts", "parameterOrder": [ @@ -10706,7 +11227,7 @@ ] }, "moveDisk": { - "description": "Moves a persistent disk from one zone to another.", + "description": "Moves a persistent disk from one zone to another. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.moveDisk", "parameterOrder": [ @@ -10739,7 +11260,7 @@ ] }, "moveInstance": { - "description": "Moves an instance and its attached persistent disks from one zone to another.", + "description": "Moves an instance and its attached persistent disks from one zone to another. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.moveInstance", "parameterOrder": [ @@ -10772,7 +11293,7 @@ ] }, "setCommonInstanceMetadata": { - "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + "description": "Sets metadata common to all instances within the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.setCommonInstanceMetadata", "parameterOrder": [ @@ -10805,7 +11326,7 @@ ] }, "setDefaultNetworkTier": { - "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", + "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.setDefaultNetworkTier", "parameterOrder": [ @@ -10838,7 +11359,7 @@ ] }, "setUsageExportBucket": { - "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.projects.setUsageExportBucket", "parameterOrder": [ @@ -10878,7 +11399,7 @@ "regionAutoscalers": { "methods": { "delete": { - "description": "Deletes the specified autoscaler.", + "description": "Deletes the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.regionAutoscalers.delete", "parameterOrder": [ @@ -10924,7 +11445,7 @@ ] }, "get": { - "description": "Returns the specified autoscaler.", + "description": "Returns the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionAutoscalers.get", "parameterOrder": [ @@ -10966,7 +11487,7 @@ ] }, "insert": { - "description": "Creates an autoscaler in the specified project using the data included in the request.", + "description": "Creates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionAutoscalers.insert", "parameterOrder": [ @@ -11007,7 +11528,7 @@ ] }, "list": { - "description": "Retrieves a list of autoscalers contained within the specified region.", + "description": "Retrieves a list of autoscalers contained within the specified region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionAutoscalers.list", "parameterOrder": [ @@ -11064,7 +11585,7 @@ ] }, "patch": { - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.regionAutoscalers.patch", "parameterOrder": [ @@ -11111,7 +11632,7 @@ ] }, "update": { - "description": "Updates an autoscaler in the specified project using the data included in the request.", + "description": "Updates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.regionAutoscalers.update", "parameterOrder": [ @@ -11162,7 +11683,7 @@ "regionBackendServices": { "methods": { "delete": { - "description": "Deletes the specified regional BackendService resource.", + "description": "Deletes the specified regional BackendService resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.regionBackendServices.delete", "parameterOrder": [ @@ -11208,7 +11729,7 @@ ] }, "get": { - "description": "Returns the specified regional BackendService resource.", + "description": "Returns the specified regional BackendService resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionBackendServices.get", "parameterOrder": [ @@ -11250,7 +11771,7 @@ ] }, "getHealth": { - "description": "Gets the most recent health check results for this regional BackendService.", + "description": "Gets the most recent health check results for this regional BackendService. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionBackendServices.getHealth", "parameterOrder": [ @@ -11294,7 +11815,7 @@ ] }, "insert": { - "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", + "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionBackendServices.insert", "parameterOrder": [ @@ -11335,7 +11856,7 @@ ] }, "list": { - "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionBackendServices.list", "parameterOrder": [ @@ -11392,7 +11913,7 @@ ] }, "patch": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.regionBackendServices.patch", "parameterOrder": [ @@ -11441,7 +11962,7 @@ ] }, "update": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.regionBackendServices.update", "parameterOrder": [ @@ -11494,7 +12015,7 @@ "regionCommitments": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of commitments.", + "description": "Retrieves an aggregated list of commitments. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionCommitments.aggregatedList", "parameterOrder": [ @@ -11543,7 +12064,7 @@ ] }, "get": { - "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionCommitments.get", "parameterOrder": [ @@ -11585,7 +12106,7 @@ ] }, "insert": { - "description": "Creates a commitment in the specified project using the data included in the request.", + "description": "Creates a commitment in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionCommitments.insert", "parameterOrder": [ @@ -11626,7 +12147,7 @@ ] }, "list": { - "description": "Retrieves a list of commitments contained within the specified region.", + "description": "Retrieves a list of commitments contained within the specified region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionCommitments.list", "parameterOrder": [ @@ -11687,7 +12208,7 @@ "regionDiskTypes": { "methods": { "get": { - "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionDiskTypes.get", "parameterOrder": [ @@ -11729,7 +12250,7 @@ ] }, "list": { - "description": "Retrieves a list of regional disk types available to the specified project.", + "description": "Retrieves a list of regional disk types available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionDiskTypes.list", "parameterOrder": [ @@ -11789,8 +12310,57 @@ }, "regionDisks": { "methods": { + "addResourcePolicies": { + "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionDisks.addResourcePolicies", + "parameterOrder": [ + "project", + "region", + "disk" + ], + "parameters": { + "disk": { + "description": "The disk name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/disks/{disk}/addResourcePolicies", + "request": { + "$ref": "RegionDisksAddResourcePoliciesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "createSnapshot": { - "description": "Creates a snapshot of this regional disk.", + "description": "Creates a snapshot of this regional disk. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionDisks.createSnapshot", "parameterOrder": [ @@ -11839,7 +12409,7 @@ ] }, "delete": { - "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.regionDisks.delete", "parameterOrder": [ @@ -11884,7 +12454,7 @@ ] }, "get": { - "description": "Returns a specified regional persistent disk.", + "description": "Returns a specified regional persistent disk. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionDisks.get", "parameterOrder": [ @@ -11926,7 +12496,7 @@ ] }, "insert": { - "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + "description": "Creates a persistent regional disk in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionDisks.insert", "parameterOrder": [ @@ -11972,7 +12542,7 @@ ] }, "list": { - "description": "Retrieves the list of persistent disks contained within the specified region.", + "description": "Retrieves the list of persistent disks contained within the specified region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionDisks.list", "parameterOrder": [ @@ -12028,8 +12598,57 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "removeResourcePolicies": { + "description": "Removes resource policies from a regional disk. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionDisks.removeResourcePolicies", + "parameterOrder": [ + "project", + "region", + "disk" + ], + "parameters": { + "disk": { + "description": "The disk name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/disks/{disk}/removeResourcePolicies", + "request": { + "$ref": "RegionDisksRemoveResourcePoliciesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "resize": { - "description": "Resizes the specified regional persistent disk.", + "description": "Resizes the specified regional persistent disk. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionDisks.resize", "parameterOrder": [ @@ -12078,7 +12697,7 @@ ] }, "setLabels": { - "description": "Sets the labels on the target regional disk.", + "description": "Sets the labels on the target regional disk. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionDisks.setLabels", "parameterOrder": [ @@ -12127,7 +12746,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionDisks.testIamPermissions", "parameterOrder": [ @@ -12173,10 +12792,298 @@ } } }, + "regionHealthChecks": { + "methods": { + "delete": { + "description": "Deletes the specified HealthCheck resource. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "DELETE", + "id": "compute.regionHealthChecks.delete", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionHealthChecks.get", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "response": { + "$ref": "HealthCheck" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionHealthChecks.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks", + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of HealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionHealthChecks.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks", + "response": { + "$ref": "HealthCheckList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "PATCH", + "id": "compute.regionHealthChecks.patch", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "PUT", + "id": "compute.regionHealthChecks.update", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "regionInstanceGroupManagers": { "methods": { "abandonInstances": { - "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.abandonInstances", "parameterOrder": [ @@ -12223,7 +13130,7 @@ ] }, "delete": { - "description": "Deletes the specified managed instance group and all of the instances in that group.", + "description": "Deletes the specified managed instance group and all of the instances in that group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.regionInstanceGroupManagers.delete", "parameterOrder": [ @@ -12267,7 +13174,7 @@ ] }, "deleteInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.deleteInstances", "parameterOrder": [ @@ -12314,7 +13221,7 @@ ] }, "get": { - "description": "Returns all of the details about the specified managed instance group.", + "description": "Returns all of the details about the specified managed instance group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionInstanceGroupManagers.get", "parameterOrder": [ @@ -12354,7 +13261,7 @@ ] }, "insert": { - "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.insert", "parameterOrder": [ @@ -12394,7 +13301,7 @@ ] }, "list": { - "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + "description": "Retrieves the list of managed instance groups that are contained within the specified region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionInstanceGroupManagers.list", "parameterOrder": [ @@ -12450,7 +13357,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -12513,7 +13420,7 @@ ] }, "patch": { - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.regionInstanceGroupManagers.patch", "parameterOrder": [ @@ -12560,7 +13467,7 @@ ] }, "recreateInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.recreateInstances", "parameterOrder": [ @@ -12607,7 +13514,7 @@ ] }, "resize": { - "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.resize", "parameterOrder": [ @@ -12660,7 +13567,7 @@ ] }, "setInstanceTemplate": { - "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", "parameterOrder": [ @@ -12707,7 +13614,7 @@ ] }, "setTargetPools": { - "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", + "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.setTargetPools", "parameterOrder": [ @@ -12758,7 +13665,7 @@ "regionInstanceGroups": { "methods": { "get": { - "description": "Returns the specified instance group resource.", + "description": "Returns the specified instance group resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionInstanceGroups.get", "parameterOrder": [ @@ -12798,7 +13705,7 @@ ] }, "list": { - "description": "Retrieves the list of instance group resources contained within the specified region.", + "description": "Retrieves the list of instance group resources contained within the specified region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionInstanceGroups.list", "parameterOrder": [ @@ -12854,7 +13761,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroups.listInstances", "parameterOrder": [ @@ -12920,7 +13827,7 @@ ] }, "setNamedPorts": { - "description": "Sets the named ports for the specified regional instance group.", + "description": "Sets the named ports for the specified regional instance group. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.regionInstanceGroups.setNamedPorts", "parameterOrder": [ @@ -12971,7 +13878,7 @@ "regionOperations": { "methods": { "delete": { - "description": "Deletes the specified region-specific Operations resource.", + "description": "Deletes the specified region-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.regionOperations.delete", "parameterOrder": [ @@ -13009,7 +13916,7 @@ ] }, "get": { - "description": "Retrieves the specified region-specific Operations resource.", + "description": "Retrieves the specified region-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionOperations.get", "parameterOrder": [ @@ -13051,7 +13958,7 @@ ] }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified region.", + "description": "Retrieves a list of Operation resources contained within the specified region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regionOperations.list", "parameterOrder": [ @@ -13109,10 +14016,1059 @@ } } }, + "regionSslCertificates": { + "methods": { + "delete": { + "description": "Deletes the specified SslCertificate resource in the region. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "DELETE", + "id": "compute.regionSslCertificates.delete", + "parameterOrder": [ + "project", + "region", + "sslCertificate" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "sslCertificate": { + "description": "Name of the SslCertificate resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionSslCertificates.get", + "parameterOrder": [ + "project", + "region", + "sslCertificate" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "sslCertificate": { + "description": "Name of the SslCertificate resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + "response": { + "$ref": "SslCertificate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionSslCertificates.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/sslCertificates", + "request": { + "$ref": "SslCertificate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionSslCertificates.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/sslCertificates", + "response": { + "$ref": "SslCertificateList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regionTargetHttpProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetHttpProxy resource. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "DELETE", + "id": "compute.regionTargetHttpProxies.delete", + "parameterOrder": [ + "project", + "region", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionTargetHttpProxies.get", + "parameterOrder": [ + "project", + "region", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "TargetHttpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionTargetHttpProxies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpProxies", + "request": { + "$ref": "TargetHttpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionTargetHttpProxies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpProxies", + "response": { + "$ref": "TargetHttpProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setUrlMap": { + "description": "Changes the URL map for TargetHttpProxy. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionTargetHttpProxies.setUrlMap", + "parameterOrder": [ + "project", + "region", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy to set a URL map for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionTargetHttpsProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetHttpsProxy resource. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "DELETE", + "id": "compute.regionTargetHttpsProxies.delete", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionTargetHttpsProxies.get", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "response": { + "$ref": "TargetHttpsProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpsProxies", + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionTargetHttpsProxies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpsProxies", + "response": { + "$ref": "TargetHttpsProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setSslCertificates": { + "description": "Replaces SslCertificates for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.setSslCertificates", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "request": { + "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUrlMap": { + "description": "Changes the URL map for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.setUrlMap", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy to set a URL map for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionUrlMaps": { + "methods": { + "delete": { + "description": "Deletes the specified UrlMap resource. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "DELETE", + "id": "compute.regionUrlMaps.delete", + "parameterOrder": [ + "project", + "region", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/urlMaps/{urlMap}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionUrlMaps.get", + "parameterOrder": [ + "project", + "region", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/urlMaps/{urlMap}", + "response": { + "$ref": "UrlMap" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a UrlMap resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionUrlMaps.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/urlMaps", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.regionUrlMaps.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/urlMaps", + "response": { + "$ref": "UrlMapList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "PATCH", + "id": "compute.regionUrlMaps.patch", + "parameterOrder": [ + "project", + "region", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/urlMaps/{urlMap}", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "description": "Updates the specified UrlMap resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "PUT", + "id": "compute.regionUrlMaps.update", + "parameterOrder": [ + "project", + "region", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/urlMaps/{urlMap}", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "validate": { + "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.regionUrlMaps.validate", + "parameterOrder": [ + "project", + "region", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to be validated as.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/urlMaps/{urlMap}/validate", + "request": { + "$ref": "RegionUrlMapsValidateRequest" + }, + "response": { + "$ref": "UrlMapsValidateResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "regions": { "methods": { "get": { - "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", + "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regions.get", "parameterOrder": [ @@ -13146,7 +15102,7 @@ ] }, "list": { - "description": "Retrieves the list of region resources available to the specified project.", + "description": "Retrieves the list of region resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.regions.list", "parameterOrder": [ @@ -13196,10 +15152,799 @@ } } }, + "reservations": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of reservations. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.reservations.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/reservations", + "response": { + "$ref": "ReservationAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified reservation. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "DELETE", + "id": "compute.reservations.delete", + "parameterOrder": [ + "project", + "zone", + "reservation" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "reservation": { + "description": "Name of the reservation to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/reservations/{reservation}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Retrieves information about the specified reservation. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.reservations.get", + "parameterOrder": [ + "project", + "zone", + "reservation" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "reservation": { + "description": "Name of the reservation to retrieve.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/reservations/{reservation}", + "response": { + "$ref": "Reservation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.reservations.getIamPolicy", + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/reservations/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a new reservation. For more information, read Reserving zonal resources. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.reservations.insert", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/reservations", + "request": { + "$ref": "Reservation" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "A list of all the reservations that have been configured for the specified project in specified zone. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.reservations.list", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/reservations", + "response": { + "$ref": "ReservationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "resize": { + "description": "Resizes the reservation (applicable to standalone reservations only). For more information, read Modifying reservations. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.reservations.resize", + "parameterOrder": [ + "project", + "zone", + "reservation" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "reservation": { + "description": "Name of the reservation to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/reservations/{reservation}/resize", + "request": { + "$ref": "ReservationsResizeRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.reservations.setIamPolicy", + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/reservations/{resource}/setIamPolicy", + "request": { + "$ref": "ZoneSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.reservations.testIamPermissions", + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/reservations/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "resourcePolicies": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of resource policies. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.resourcePolicies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/resourcePolicies", + "response": { + "$ref": "ResourcePolicyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified resource policy. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "DELETE", + "id": "compute.resourcePolicies.delete", + "parameterOrder": [ + "project", + "region", + "resourcePolicy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resourcePolicy": { + "description": "Name of the resource policy to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Retrieves all information of the specified resource policy. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.resourcePolicies.get", + "parameterOrder": [ + "project", + "region", + "resourcePolicy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resourcePolicy": { + "description": "Name of the resource policy to retrieve.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + "response": { + "$ref": "ResourcePolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.resourcePolicies.getIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a new resource policy. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.resourcePolicies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies", + "request": { + "$ref": "ResourcePolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "A list all the resource policies that have been configured for the specified project in specified region. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.resourcePolicies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies", + "response": { + "$ref": "ResourcePolicyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.resourcePolicies.setIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy", + "request": { + "$ref": "RegionSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.resourcePolicies.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "routers": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of routers.", + "description": "Retrieves an aggregated list of routers. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.routers.aggregatedList", "parameterOrder": [ @@ -13248,7 +15993,7 @@ ] }, "delete": { - "description": "Deletes the specified Router resource.", + "description": "Deletes the specified Router resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.routers.delete", "parameterOrder": [ @@ -13294,7 +16039,7 @@ ] }, "get": { - "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", + "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.routers.get", "parameterOrder": [ @@ -13336,7 +16081,7 @@ ] }, "getNatMappingInfo": { - "description": "Retrieves runtime Nat mapping information of VM endpoints.", + "description": "Retrieves runtime Nat mapping information of VM endpoints. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.routers.getNatMappingInfo", "parameterOrder": [ @@ -13401,7 +16146,7 @@ ] }, "getRouterStatus": { - "description": "Retrieves runtime information of the specified router.", + "description": "Retrieves runtime information of the specified router. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.routers.getRouterStatus", "parameterOrder": [ @@ -13443,7 +16188,7 @@ ] }, "insert": { - "description": "Creates a Router resource in the specified project and region using the data included in the request.", + "description": "Creates a Router resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.routers.insert", "parameterOrder": [ @@ -13484,7 +16229,7 @@ ] }, "list": { - "description": "Retrieves a list of Router resources available to the specified project.", + "description": "Retrieves a list of Router resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.routers.list", "parameterOrder": [ @@ -13541,7 +16286,7 @@ ] }, "patch": { - "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.routers.patch", "parameterOrder": [ @@ -13590,7 +16335,7 @@ ] }, "preview": { - "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.routers.preview", "parameterOrder": [ @@ -13635,7 +16380,7 @@ ] }, "update": { - "description": "Updates the specified Router resource with the data included in the request.", + "description": "Updates the specified Router resource with the data included in the request. This method conforms to PUT semantics, which requests that the state of the target resource be created or replaced with the state defined by the representation enclosed in the request message payload. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.routers.update", "parameterOrder": [ @@ -13688,7 +16433,7 @@ "routes": { "methods": { "delete": { - "description": "Deletes the specified Route resource.", + "description": "Deletes the specified Route resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.routes.delete", "parameterOrder": [ @@ -13726,7 +16471,7 @@ ] }, "get": { - "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", + "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.routes.get", "parameterOrder": [ @@ -13760,7 +16505,7 @@ ] }, "insert": { - "description": "Creates a Route resource in the specified project using the data included in the request.", + "description": "Creates a Route resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.routes.insert", "parameterOrder": [ @@ -13793,7 +16538,7 @@ ] }, "list": { - "description": "Retrieves the list of Route resources available to the specified project.", + "description": "Retrieves the list of Route resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.routes.list", "parameterOrder": [ @@ -13846,7 +16591,7 @@ "securityPolicies": { "methods": { "addRule": { - "description": "Inserts a rule into a security policy.", + "description": "Inserts a rule into a security policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.securityPolicies.addRule", "parameterOrder": [ @@ -13882,7 +16627,7 @@ ] }, "delete": { - "description": "Deletes the specified policy.", + "description": "Deletes the specified policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.securityPolicies.delete", "parameterOrder": [ @@ -13920,7 +16665,7 @@ ] }, "get": { - "description": "List all of the ordered rules present in a single specified policy.", + "description": "List all of the ordered rules present in a single specified policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.securityPolicies.get", "parameterOrder": [ @@ -13954,7 +16699,7 @@ ] }, "getRule": { - "description": "Gets a rule at the specified priority.", + "description": "Gets a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.securityPolicies.getRule", "parameterOrder": [ @@ -13994,7 +16739,7 @@ ] }, "insert": { - "description": "Creates a new policy in the specified project using the data included in the request.", + "description": "Creates a new policy in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.securityPolicies.insert", "parameterOrder": [ @@ -14027,7 +16772,7 @@ ] }, "list": { - "description": "List all the policies that have been configured for the specified project.", + "description": "List all the policies that have been configured for the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.securityPolicies.list", "parameterOrder": [ @@ -14076,7 +16821,7 @@ ] }, "patch": { - "description": "Patches the specified policy with the data included in the request.", + "description": "Patches the specified policy with the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.securityPolicies.patch", "parameterOrder": [ @@ -14117,7 +16862,7 @@ ] }, "patchRule": { - "description": "Patches a rule at the specified priority.", + "description": "Patches a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.securityPolicies.patchRule", "parameterOrder": [ @@ -14159,7 +16904,7 @@ ] }, "removeRule": { - "description": "Deletes a rule at the specified priority.", + "description": "Deletes a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.securityPolicies.removeRule", "parameterOrder": [ @@ -14202,7 +16947,7 @@ "snapshots": { "methods": { "delete": { - "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots.", + "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.snapshots.delete", "parameterOrder": [ @@ -14240,7 +16985,7 @@ ] }, "get": { - "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.snapshots.get", "parameterOrder": [ @@ -14274,7 +17019,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.snapshots.getIamPolicy", "parameterOrder": [ @@ -14308,7 +17053,7 @@ ] }, "list": { - "description": "Retrieves the list of Snapshot resources contained within the specified project.", + "description": "Retrieves the list of Snapshot resources contained within the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.snapshots.list", "parameterOrder": [ @@ -14357,7 +17102,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.snapshots.setIamPolicy", "parameterOrder": [ @@ -14393,7 +17138,7 @@ ] }, "setLabels": { - "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", + "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.snapshots.setLabels", "parameterOrder": [ @@ -14429,7 +17174,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.snapshots.testIamPermissions", "parameterOrder": [ @@ -14469,8 +17214,57 @@ }, "sslCertificates": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.sslCertificates.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/sslCertificates", + "response": { + "$ref": "SslCertificateAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { - "description": "Deletes the specified SslCertificate resource.", + "description": "Deletes the specified SslCertificate resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.sslCertificates.delete", "parameterOrder": [ @@ -14508,7 +17302,7 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", + "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.sslCertificates.get", "parameterOrder": [ @@ -14542,7 +17336,7 @@ ] }, "insert": { - "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + "description": "Creates a SslCertificate resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.sslCertificates.insert", "parameterOrder": [ @@ -14575,7 +17369,7 @@ ] }, "list": { - "description": "Retrieves the list of SslCertificate resources available to the specified project.", + "description": "Retrieves the list of SslCertificate resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.sslCertificates.list", "parameterOrder": [ @@ -14628,7 +17422,7 @@ "sslPolicies": { "methods": { "delete": { - "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.sslPolicies.delete", "parameterOrder": [ @@ -14665,7 +17459,7 @@ ] }, "get": { - "description": "Lists all of the ordered rules present in a single specified policy.", + "description": "Lists all of the ordered rules present in a single specified policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.sslPolicies.get", "parameterOrder": [ @@ -14698,7 +17492,7 @@ ] }, "insert": { - "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.sslPolicies.insert", "parameterOrder": [ @@ -14731,7 +17525,7 @@ ] }, "list": { - "description": "Lists all the SSL policies that have been configured for the specified project.", + "description": "Lists all the SSL policies that have been configured for the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.sslPolicies.list", "parameterOrder": [ @@ -14780,7 +17574,7 @@ ] }, "listAvailableFeatures": { - "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + "description": "Lists all features that can be specified in the SSL policy when using custom profile. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.sslPolicies.listAvailableFeatures", "parameterOrder": [ @@ -14829,7 +17623,7 @@ ] }, "patch": { - "description": "Patches the specified SSL policy with the data included in the request.", + "description": "Patches the specified SSL policy with the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.sslPolicies.patch", "parameterOrder": [ @@ -14873,7 +17667,7 @@ "subnetworks": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of subnetworks.", + "description": "Retrieves an aggregated list of subnetworks. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.subnetworks.aggregatedList", "parameterOrder": [ @@ -14922,7 +17716,7 @@ ] }, "delete": { - "description": "Deletes the specified subnetwork.", + "description": "Deletes the specified subnetwork. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.subnetworks.delete", "parameterOrder": [ @@ -14968,7 +17762,7 @@ ] }, "expandIpCidrRange": { - "description": "Expands the IP CIDR range of the subnetwork to a specified value.", + "description": "Expands the IP CIDR range of the subnetwork to a specified value. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.subnetworks.expandIpCidrRange", "parameterOrder": [ @@ -15017,7 +17811,7 @@ ] }, "get": { - "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", + "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.subnetworks.get", "parameterOrder": [ @@ -15059,7 +17853,7 @@ ] }, "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.subnetworks.getIamPolicy", "parameterOrder": [ @@ -15101,7 +17895,7 @@ ] }, "insert": { - "description": "Creates a subnetwork in the specified project using the data included in the request.", + "description": "Creates a subnetwork in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.subnetworks.insert", "parameterOrder": [ @@ -15142,7 +17936,7 @@ ] }, "list": { - "description": "Retrieves a list of subnetworks available to the specified project.", + "description": "Retrieves a list of subnetworks available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.subnetworks.list", "parameterOrder": [ @@ -15199,7 +17993,7 @@ ] }, "listUsable": { - "description": "Retrieves an aggregated list of usable subnetworks.", + "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.subnetworks.listUsable", "parameterOrder": [ @@ -15248,7 +18042,7 @@ ] }, "patch": { - "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched.", + "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.subnetworks.patch", "parameterOrder": [ @@ -15257,6 +18051,12 @@ "subnetwork" ], "parameters": { + "drainTimeoutSeconds": { + "description": "The drain timeout specifies the upper bound in seconds on the amount of time allowed to drain connections from the current ACTIVE subnetwork to the current BACKUP subnetwork. The drain timeout is only applicable when the following conditions are true: - the subnetwork being patched has purpose = INTERNAL_HTTPS_LOAD_BALANCER - the subnetwork being patched has role = BACKUP - the patch request is setting the role to ACTIVE. Note that after this patch operation the roles of the ACTIVE and BACKUP subnetworks will be swapped.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15297,7 +18097,7 @@ ] }, "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.subnetworks.setIamPolicy", "parameterOrder": [ @@ -15341,7 +18141,7 @@ ] }, "setPrivateIpGoogleAccess": { - "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", + "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.subnetworks.setPrivateIpGoogleAccess", "parameterOrder": [ @@ -15390,7 +18190,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.subnetworks.testIamPermissions", "parameterOrder": [ @@ -15438,8 +18238,57 @@ }, "targetHttpProxies": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.targetHttpProxies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/targetHttpProxies", + "response": { + "$ref": "TargetHttpProxyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { - "description": "Deletes the specified TargetHttpProxy resource.", + "description": "Deletes the specified TargetHttpProxy resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.targetHttpProxies.delete", "parameterOrder": [ @@ -15477,7 +18326,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetHttpProxies.get", "parameterOrder": [ @@ -15511,7 +18360,7 @@ ] }, "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetHttpProxies.insert", "parameterOrder": [ @@ -15544,7 +18393,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetHttpProxies.list", "parameterOrder": [ @@ -15593,7 +18442,7 @@ ] }, "setUrlMap": { - "description": "Changes the URL map for TargetHttpProxy.", + "description": "Changes the URL map for TargetHttpProxy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetHttpProxies.setUrlMap", "parameterOrder": [ @@ -15637,8 +18486,57 @@ }, "targetHttpsProxies": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.targetHttpsProxies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/targetHttpsProxies", + "response": { + "$ref": "TargetHttpsProxyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { - "description": "Deletes the specified TargetHttpsProxy resource.", + "description": "Deletes the specified TargetHttpsProxy resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.targetHttpsProxies.delete", "parameterOrder": [ @@ -15676,7 +18574,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", + "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetHttpsProxies.get", "parameterOrder": [ @@ -15710,7 +18608,7 @@ ] }, "insert": { - "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", + "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetHttpsProxies.insert", "parameterOrder": [ @@ -15743,7 +18641,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetHttpsProxies.list", "parameterOrder": [ @@ -15792,7 +18690,7 @@ ] }, "setQuicOverride": { - "description": "Sets the QUIC override policy for TargetHttpsProxy.", + "description": "Sets the QUIC override policy for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetHttpsProxies.setQuicOverride", "parameterOrder": [ @@ -15832,7 +18730,7 @@ ] }, "setSslCertificates": { - "description": "Replaces SslCertificates for TargetHttpsProxy.", + "description": "Replaces SslCertificates for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetHttpsProxies.setSslCertificates", "parameterOrder": [ @@ -15873,7 +18771,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetHttpsProxies.setSslPolicy", "parameterOrder": [ @@ -15913,7 +18811,7 @@ ] }, "setUrlMap": { - "description": "Changes the URL map for TargetHttpsProxy.", + "description": "Changes the URL map for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetHttpsProxies.setUrlMap", "parameterOrder": [ @@ -15958,7 +18856,7 @@ "targetInstances": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of target instances.", + "description": "Retrieves an aggregated list of target instances. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetInstances.aggregatedList", "parameterOrder": [ @@ -16007,7 +18905,7 @@ ] }, "delete": { - "description": "Deletes the specified TargetInstance resource.", + "description": "Deletes the specified TargetInstance resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.targetInstances.delete", "parameterOrder": [ @@ -16053,7 +18951,7 @@ ] }, "get": { - "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetInstances.get", "parameterOrder": [ @@ -16095,7 +18993,7 @@ ] }, "insert": { - "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetInstances.insert", "parameterOrder": [ @@ -16136,7 +19034,7 @@ ] }, "list": { - "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + "description": "Retrieves a list of TargetInstance resources available to the specified project and zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetInstances.list", "parameterOrder": [ @@ -16197,7 +19095,7 @@ "targetPools": { "methods": { "addHealthCheck": { - "description": "Adds health check URLs to a target pool.", + "description": "Adds health check URLs to a target pool. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetPools.addHealthCheck", "parameterOrder": [ @@ -16246,7 +19144,7 @@ ] }, "addInstance": { - "description": "Adds an instance to a target pool.", + "description": "Adds an instance to a target pool. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetPools.addInstance", "parameterOrder": [ @@ -16295,7 +19193,7 @@ ] }, "aggregatedList": { - "description": "Retrieves an aggregated list of target pools.", + "description": "Retrieves an aggregated list of target pools. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetPools.aggregatedList", "parameterOrder": [ @@ -16344,7 +19242,7 @@ ] }, "delete": { - "description": "Deletes the specified target pool.", + "description": "Deletes the specified target pool. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.targetPools.delete", "parameterOrder": [ @@ -16390,7 +19288,7 @@ ] }, "get": { - "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", + "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetPools.get", "parameterOrder": [ @@ -16432,7 +19330,7 @@ ] }, "getHealth": { - "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", + "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetPools.getHealth", "parameterOrder": [ @@ -16477,7 +19375,7 @@ ] }, "insert": { - "description": "Creates a target pool in the specified project and region using the data included in the request.", + "description": "Creates a target pool in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetPools.insert", "parameterOrder": [ @@ -16518,7 +19416,7 @@ ] }, "list": { - "description": "Retrieves a list of target pools available to the specified project and region.", + "description": "Retrieves a list of target pools available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetPools.list", "parameterOrder": [ @@ -16575,7 +19473,7 @@ ] }, "removeHealthCheck": { - "description": "Removes health check URL from a target pool.", + "description": "Removes health check URL from a target pool. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetPools.removeHealthCheck", "parameterOrder": [ @@ -16624,7 +19522,7 @@ ] }, "removeInstance": { - "description": "Removes instance URL from a target pool.", + "description": "Removes instance URL from a target pool. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetPools.removeInstance", "parameterOrder": [ @@ -16673,7 +19571,7 @@ ] }, "setBackup": { - "description": "Changes a backup target pool's configurations.", + "description": "Changes a backup target pool's configurations. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetPools.setBackup", "parameterOrder": [ @@ -16732,7 +19630,7 @@ "targetSslProxies": { "methods": { "delete": { - "description": "Deletes the specified TargetSslProxy resource.", + "description": "Deletes the specified TargetSslProxy resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.targetSslProxies.delete", "parameterOrder": [ @@ -16770,7 +19668,7 @@ ] }, "get": { - "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", + "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetSslProxies.get", "parameterOrder": [ @@ -16804,7 +19702,7 @@ ] }, "insert": { - "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", + "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetSslProxies.insert", "parameterOrder": [ @@ -16837,7 +19735,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", + "description": "Retrieves the list of TargetSslProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetSslProxies.list", "parameterOrder": [ @@ -16886,7 +19784,7 @@ ] }, "setBackendService": { - "description": "Changes the BackendService for TargetSslProxy.", + "description": "Changes the BackendService for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetSslProxies.setBackendService", "parameterOrder": [ @@ -16927,7 +19825,7 @@ ] }, "setProxyHeader": { - "description": "Changes the ProxyHeaderType for TargetSslProxy.", + "description": "Changes the ProxyHeaderType for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetSslProxies.setProxyHeader", "parameterOrder": [ @@ -16968,7 +19866,7 @@ ] }, "setSslCertificates": { - "description": "Changes SslCertificates for TargetSslProxy.", + "description": "Changes SslCertificates for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslCertificates", "parameterOrder": [ @@ -17009,7 +19907,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslPolicy", "parameterOrder": [ @@ -17053,7 +19951,7 @@ "targetTcpProxies": { "methods": { "delete": { - "description": "Deletes the specified TargetTcpProxy resource.", + "description": "Deletes the specified TargetTcpProxy resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.targetTcpProxies.delete", "parameterOrder": [ @@ -17091,7 +19989,7 @@ ] }, "get": { - "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetTcpProxies.get", "parameterOrder": [ @@ -17125,7 +20023,7 @@ ] }, "insert": { - "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", + "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetTcpProxies.insert", "parameterOrder": [ @@ -17158,7 +20056,7 @@ ] }, "list": { - "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", + "description": "Retrieves the list of TargetTcpProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetTcpProxies.list", "parameterOrder": [ @@ -17207,7 +20105,7 @@ ] }, "setBackendService": { - "description": "Changes the BackendService for TargetTcpProxy.", + "description": "Changes the BackendService for TargetTcpProxy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetTcpProxies.setBackendService", "parameterOrder": [ @@ -17248,7 +20146,7 @@ ] }, "setProxyHeader": { - "description": "Changes the ProxyHeaderType for TargetTcpProxy.", + "description": "Changes the ProxyHeaderType for TargetTcpProxy. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetTcpProxies.setProxyHeader", "parameterOrder": [ @@ -17293,7 +20191,7 @@ "targetVpnGateways": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of target VPN gateways.", + "description": "Retrieves an aggregated list of target VPN gateways. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetVpnGateways.aggregatedList", "parameterOrder": [ @@ -17342,7 +20240,7 @@ ] }, "delete": { - "description": "Deletes the specified target VPN gateway.", + "description": "Deletes the specified target VPN gateway. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.targetVpnGateways.delete", "parameterOrder": [ @@ -17388,7 +20286,7 @@ ] }, "get": { - "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetVpnGateways.get", "parameterOrder": [ @@ -17430,7 +20328,7 @@ ] }, "insert": { - "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", + "description": "Creates a target VPN gateway in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.targetVpnGateways.insert", "parameterOrder": [ @@ -17471,7 +20369,7 @@ ] }, "list": { - "description": "Retrieves a list of target VPN gateways available to the specified project and region.", + "description": "Retrieves a list of target VPN gateways available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.targetVpnGateways.list", "parameterOrder": [ @@ -17531,8 +20429,57 @@ }, "urlMaps": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.urlMaps.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/urlMaps", + "response": { + "$ref": "UrlMapsAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { - "description": "Deletes the specified UrlMap resource.", + "description": "Deletes the specified UrlMap resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.urlMaps.delete", "parameterOrder": [ @@ -17570,7 +20517,7 @@ ] }, "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.urlMaps.get", "parameterOrder": [ @@ -17604,7 +20551,7 @@ ] }, "insert": { - "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + "description": "Creates a UrlMap resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.urlMaps.insert", "parameterOrder": [ @@ -17637,7 +20584,7 @@ ] }, "invalidateCache": { - "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.urlMaps.invalidateCache", "parameterOrder": [ @@ -17678,7 +20625,7 @@ ] }, "list": { - "description": "Retrieves the list of UrlMap resources available to the specified project.", + "description": "Retrieves the list of UrlMap resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.urlMaps.list", "parameterOrder": [ @@ -17727,7 +20674,7 @@ ] }, "patch": { - "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PATCH", "id": "compute.urlMaps.patch", "parameterOrder": [ @@ -17768,7 +20715,7 @@ ] }, "update": { - "description": "Updates the specified UrlMap resource with the data included in the request.", + "description": "Updates the specified UrlMap resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "PUT", "id": "compute.urlMaps.update", "parameterOrder": [ @@ -17809,7 +20756,7 @@ ] }, "validate": { - "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.urlMaps.validate", "parameterOrder": [ @@ -17846,10 +20793,385 @@ } } }, + "vpnGateways": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of VPN gateways. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.vpnGateways.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/vpnGateways", + "response": { + "$ref": "VpnGatewayAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified VPN gateway. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "DELETE", + "id": "compute.vpnGateways.delete", + "parameterOrder": [ + "project", + "region", + "vpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "vpnGateway": { + "description": "Name of the VPN gateway to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.vpnGateways.get", + "parameterOrder": [ + "project", + "region", + "vpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnGateway": { + "description": "Name of the VPN gateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}", + "response": { + "$ref": "VpnGateway" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getStatus": { + "description": "Returns the status for the specified VPN gateway. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.vpnGateways.getStatus", + "parameterOrder": [ + "project", + "region", + "vpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnGateway": { + "description": "Name of the VPN gateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", + "response": { + "$ref": "VpnGatewaysGetStatusResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a VPN gateway in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.vpnGateways.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways", + "request": { + "$ref": "VpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of VPN gateways available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "GET", + "id": "compute.vpnGateways.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways", + "response": { + "$ref": "VpnGatewayList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.vpnGateways.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "POST", + "id": "compute.vpnGateways.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "vpnTunnels": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of VPN tunnels.", + "description": "Retrieves an aggregated list of VPN tunnels. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.vpnTunnels.aggregatedList", "parameterOrder": [ @@ -17898,7 +21220,7 @@ ] }, "delete": { - "description": "Deletes the specified VpnTunnel resource.", + "description": "Deletes the specified VpnTunnel resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.vpnTunnels.delete", "parameterOrder": [ @@ -17944,7 +21266,7 @@ ] }, "get": { - "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", + "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.vpnTunnels.get", "parameterOrder": [ @@ -17986,7 +21308,7 @@ ] }, "insert": { - "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "POST", "id": "compute.vpnTunnels.insert", "parameterOrder": [ @@ -18027,7 +21349,7 @@ ] }, "list": { - "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", + "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.vpnTunnels.list", "parameterOrder": [ @@ -18088,7 +21410,7 @@ "zoneOperations": { "methods": { "delete": { - "description": "Deletes the specified zone-specific Operations resource.", + "description": "Deletes the specified zone-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "DELETE", "id": "compute.zoneOperations.delete", "parameterOrder": [ @@ -18126,7 +21448,7 @@ ] }, "get": { - "description": "Retrieves the specified zone-specific Operations resource.", + "description": "Retrieves the specified zone-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.zoneOperations.get", "parameterOrder": [ @@ -18168,7 +21490,7 @@ ] }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified zone.", + "description": "Retrieves a list of Operation resources contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.zoneOperations.list", "parameterOrder": [ @@ -18229,7 +21551,7 @@ "zones": { "methods": { "get": { - "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.zones.get", "parameterOrder": [ @@ -18263,7 +21585,7 @@ ] }, "list": { - "description": "Retrieves the list of Zone resources available to the specified project.", + "description": "Retrieves the list of Zone resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", "httpMethod": "GET", "id": "compute.zones.list", "parameterOrder": [ @@ -18314,8 +21636,8 @@ } } }, - "revision": "20190403", - "rootUrl": "https://www.googleapis.com/", + "revision": "20191014", + "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { "description": "A specification of the type and number of accelerator cards attached to the instance.", @@ -18334,7 +21656,7 @@ "type": "object" }, "AcceleratorType": { - "description": "An Accelerator Type resource. (== resource_for beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==)", + "description": "Represents an Accelerator Type resource.\n\nGoogle Cloud Platform provides graphics processing units (accelerators) that you can add to VM instances to improve or accelerate performance when working with intensive workloads. For more information, read GPUs on Compute Engine. (== resource_for beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==)", "id": "AcceleratorType", "properties": { "creationTimestamp": { @@ -18708,7 +22030,7 @@ "type": "string" }, "name": { - "description": "The name of this access configuration. The default and recommended name is External NAT but you can use any arbitrary string you would like. For example, My external IP or Network Access.", + "description": "The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.", "type": "string" }, "natIP": { @@ -18728,11 +22050,11 @@ "type": "string" }, "publicPtrDomainName": { - "description": "The DNS domain name for the public PTR record. This field can only be set when the set_public_ptr field is enabled.", + "description": "The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled.", "type": "string" }, "setPublicPtr": { - "description": "Specifies whether a public DNS ?PTR? record should be created to map the external IP address of the instance to a DNS domain name.", + "description": "Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name.", "type": "boolean" }, "type": { @@ -18750,7 +22072,7 @@ "type": "object" }, "Address": { - "description": "A reserved address resource. (== resource_for beta.addresses ==) (== resource_for v1.addresses ==) (== resource_for beta.globalAddresses ==) (== resource_for v1.globalAddresses ==)", + "description": "Represents an IP Address resource.\n\nAn address resource represents a regional internal IP address. Regional internal IP addresses are RFC 1918 addresses that come from either a primary or secondary IP range of a subnet in a VPC network. Regional external IP addresses can be assigned to GCP VM instances, Cloud VPN gateways, regional external forwarding rules for network load balancers (in either Standard or Premium Tier), and regional external forwarding rules for HTTP(S), SSL Proxy, and TCP Proxy load balancers in Standard Tier. For more information, read IP addresses.\n\nA globalAddresses resource represent a global external IP address. Global external IP addresses are IPv4 or IPv6 addresses. They can only be assigned to global forwarding rules for HTTP(S), SSL Proxy, or TCP Proxy load balancers in Premium Tier. For more information, read Global resources. (== resource_for beta.addresses ==) (== resource_for v1.addresses ==) (== resource_for beta.globalAddresses ==) (== resource_for v1.globalAddresses ==)", "id": "Address", "properties": { "address": { @@ -18776,7 +22098,7 @@ "type": "string" }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "An optional description of this resource. Provide this field when you create the resource.", "type": "string" }, "id": { @@ -18785,7 +22107,7 @@ "type": "string" }, "ipVersion": { - "description": "The IP Version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.", + "description": "The IP version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.", "enum": [ "IPV4", "IPV6", @@ -18809,16 +22131,16 @@ "compute.addresses.insert" ] }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "network": { - "description": "The URL of the network in which to reserve the address. This field can only be used with INTERNAL type with VPC_PEERING purpose.", + "description": "The URL of the network in which to reserve the address. This field can only be used with INTERNAL type with the VPC_PEERING purpose.", "type": "string" }, "networkTier": { - "description": "This signifies the networking tier used for configuring this Address and can only take the following values: PREMIUM, STANDARD. Global forwarding rules can only be Premium Tier. Regional forwarding rules can be either Premium or Standard Tier. Standard Tier addresses applied to regional forwarding rules can be used with any external load balancer. Regional forwarding rules in Premium Tier can only be used with a Network load balancer.\n\nIf this field is not specified, it is assumed to be PREMIUM.", + "description": "This signifies the networking tier used for configuring this address and can only take the following values: PREMIUM or STANDARD. Global forwarding rules can only be Premium Tier. Regional forwarding rules can be either Premium or Standard Tier. Standard Tier addresses applied to regional forwarding rules can be used with any external load balancer. Regional forwarding rules in Premium Tier can only be used with a network load balancer.\n\nIf this field is not specified, it is assumed to be PREMIUM.", "enum": [ "PREMIUM", "STANDARD" @@ -18835,7 +22157,7 @@ "type": "integer" }, "purpose": { - "description": "The purpose of resource, only used with INTERNAL type.", + "description": "The purpose of this resource, which can be one of the following values: \n- `GCE_ENDPOINT` for addresses that are used by VM instances, alias IP ranges, internal load balancers, and similar resources. \n- `DNS_RESOLVER` for a DNS resolver address in a subnetwork \n- `VPC_PEERING` for addresses that are reserved for VPC peer networks. \n- `NAT_AUTO` for addresses that are external IP addresses automatically reserved for Cloud NAT.", "enum": [ "DNS_RESOLVER", "GCE_ENDPOINT", @@ -18851,7 +22173,7 @@ "type": "string" }, "region": { - "description": "[Output Only] URL of the region where the regional address resides. This field is not applicable to global addresses. You must specify this field as part of the HTTP request URL. You cannot set this field in the request body.", + "description": "[Output Only] The URL of the region where the regional address resides. This field is not applicable to global addresses. You must specify this field as part of the HTTP request URL.", "type": "string" }, "selfLink": { @@ -18873,7 +22195,7 @@ "type": "string" }, "subnetwork": { - "description": "The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER purposes.", + "description": "The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with a GCE_ENDPOINT or DNS_RESOLVER purpose.", "type": "string" }, "users": { @@ -19209,16 +22531,89 @@ "id": "AliasIpRange", "properties": { "ipCidrRange": { - "description": "The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24).", + "description": "The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24).", "type": "string" }, "subnetworkRangeName": { - "description": "Optional subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used.", + "description": "The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used.", "type": "string" } }, "type": "object" }, + "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk": { + "id": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk", + "properties": { + "diskSizeGb": { + "description": "Specifies the size of the disk in base-2 GB.", + "format": "int64", + "type": "string" + }, + "interface": { + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "enum": [ + "NVME", + "SCSI" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "AllocationSpecificSKUAllocationReservedInstanceProperties": { + "description": "Properties of the SKU instances being reserved.", + "id": "AllocationSpecificSKUAllocationReservedInstanceProperties", + "properties": { + "guestAccelerators": { + "description": "Specifies accelerator type and count.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + }, + "localSsds": { + "description": "Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd.", + "items": { + "$ref": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk" + }, + "type": "array" + }, + "machineType": { + "description": "Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.", + "type": "string" + }, + "minCpuPlatform": { + "description": "Minimum cpu platform the reservation.", + "type": "string" + } + }, + "type": "object" + }, + "AllocationSpecificSKUReservation": { + "description": "This reservation type allows to pre allocate specific instance configuration.", + "id": "AllocationSpecificSKUReservation", + "properties": { + "count": { + "description": "Specifies the number of resources that are allocated.", + "format": "int64", + "type": "string" + }, + "inUseCount": { + "description": "[Output Only] Indicates how many instances are in use.", + "format": "int64", + "type": "string" + }, + "instanceProperties": { + "$ref": "AllocationSpecificSKUAllocationReservedInstanceProperties", + "description": "The instance properties for the reservation." + } + }, + "type": "object" + }, "AttachedDisk": { "description": "An instance-attached disk resource.", "id": "AttachedDisk", @@ -19256,7 +22651,7 @@ "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both." }, "interface": { - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. TODO(b/131765817): Update documentation when NVME is supported.", "enum": [ "NVME", "SCSI" @@ -19292,7 +22687,7 @@ "type": "string" }, "source": { - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk.", + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk.", "type": "string" }, "type": { @@ -19323,7 +22718,7 @@ "type": "string" }, "diskSizeGb": { - "description": "Specifies the size of the disk in base-2 GB.", + "description": "Specifies the size of the disk in base-2 GB. If not specified, the disk will be the same size as the image (usually 10GB). If specified, the size must be equal to or larger than 10GB.", "format": "int64", "type": "string" }, @@ -19338,19 +22733,34 @@ "description": "Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks.", "type": "object" }, + "resourcePolicies": { + "description": "Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name.", + "items": { + "type": "string" + }, + "type": "array" + }, "sourceImage": { - "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family\n\n\nIf the source image is deleted later, this field will not be set.", + "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family\n\n\nIf the source image is deleted later, this field will not be set.", "type": "string" }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." + }, + "sourceSnapshot": { + "description": "The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD.\n\nTo create a disk with a snapshot that you created, specify the snapshot name in the following format:\nglobal/snapshots/my-backup\n\n\nIf the source snapshot is deleted later, this field will not be set.", + "type": "string" + }, + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot." } }, "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", }, { \"log_type\": \"ADMIN_READ\", } ] }, { \"service\": \"fooservice.googleapis.com\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:bar@gmail.com\" ] } ] } ] }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts foo@gmail.com from DATA_READ logging, and bar@gmail.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\", }, { \"log_type\": \"ADMIN_READ\", } ] }, { \"service\": \"sampleservice.googleapis.com\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -19375,7 +22785,7 @@ "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting foo@gmail.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { @@ -19385,6 +22795,10 @@ }, "type": "array" }, + "ignoreChildExemptions": { + "description": "", + "type": "boolean" + }, "logType": { "description": "The log type that this config enables.", "enum": [ @@ -19430,7 +22844,7 @@ "type": "object" }, "Autoscaler": { - "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances. (== resource_for beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== resource_for v1.regionAutoscalers ==)", + "description": "Represents an Autoscaler resource.\n\n\n\nUse autoscalers to automatically add or delete instances from a managed instance group according to your defined autoscaling policy. For more information, read Autoscaling Groups of Instances.\n\nFor zonal managed instance groups resource, use the autoscaler resource.\n\nFor regional managed instance groups, use the regionAutoscalers resource. (== resource_for beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== resource_for v1.regionAutoscalers ==)", "id": "Autoscaler", "properties": { "autoscalingPolicy": { @@ -19474,7 +22888,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of the autoscaler configuration.", + "description": "[Output Only] The status of the autoscaler configuration. Current set of possible values: PENDING: Autoscaler backend hasn't read new/updated configuration DELETING: Configuration is being deleted ACTIVE: Configuration is acknowledged to be effective. Some warnings might or might not be present in the status_details field. ERROR: Configuration has errors. Actionable for users. Details are present in the status_details field. New values might be added in the future.", "enum": [ "ACTIVE", "DELETING", @@ -19739,7 +23153,7 @@ "type": "string" }, "type": { - "description": "The type of error returned.", + "description": "The type of error, warning or notice returned. Current set of possible values: ALL_INSTANCES_UNHEALTHY (WARNING): All instances in the instance group are unhealthy (not in RUNNING state). BACKEND_SERVICE_DOES_NOT_EXIST (ERROR): There is no backend service attached to the instance group. CAPPED_AT_MAX_NUM_REPLICAS (WARNING): Autoscaler recommends size bigger than maxNumReplicas. CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE (WARNING): The custom metric samples are not exported often enough to be a credible base for autoscaling. CUSTOM_METRIC_INVALID (ERROR): The custom metric that was specified does not exist or does not have the necessary labels. MIN_EQUALS_MAX (WARNING): The minNumReplicas is equal to maxNumReplicas. This means the autoscaler cannot add or remove instances from the instance group. MISSING_CUSTOM_METRIC_DATA_POINTS (WARNING): The autoscaler did not receive any data from the custom metric configured for autoscaling. MISSING_LOAD_BALANCING_DATA_POINTS (WARNING): The autoscaler is configured to scale based on a load balancing signal but the instance group has not received any requests from the load balancer. MODE_OFF (WARNING): Autoscaling is turned off. The number of instances in the group won't change automatically. The autoscaling configuration is preserved. MODE_ONLY_UP (WARNING): Autoscaling is in the \"Autoscale only up\" mode. Instances in the group will be only added. MORE_THAN_ONE_BACKEND_SERVICE (ERROR): The instance group cannot be autoscaled because it has more than one backend service attached to it. NOT_ENOUGH_QUOTA_AVAILABLE (ERROR): Exceeded quota for necessary resources, such as CPU, number of instances and so on. REGION_RESOURCE_STOCKOUT (ERROR): Showed only for regional autoscalers: there is a resource stockout in the chosen region. SCALING_TARGET_DOES_NOT_EXIST (ERROR): The target to be scaled does not exist. UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION (ERROR): Autoscaling does not work with an HTTP/S load balancer that has been configured for maxRate. ZONE_RESOURCE_STOCKOUT (ERROR): For zonal autoscalers: there is a resource stockout in the chosen zone. For regional autoscalers: in at least one of the zones you're using there is a resource stockout. New values might be added in the future. Some of the values might not be available in all API versions.", "enum": [ "ALL_INSTANCES_UNHEALTHY", "BACKEND_SERVICE_DOES_NOT_EXIST", @@ -19749,6 +23163,7 @@ "MIN_EQUALS_MAX", "MISSING_CUSTOM_METRIC_DATA_POINTS", "MISSING_LOAD_BALANCING_DATA_POINTS", + "MODE_OFF", "MORE_THAN_ONE_BACKEND_SERVICE", "NOT_ENOUGH_QUOTA_AVAILABLE", "REGION_RESOURCE_STOCKOUT", @@ -19772,6 +23187,7 @@ "", "", "", + "", "" ], "type": "string" @@ -19969,7 +23385,7 @@ "id": "Backend", "properties": { "balancingMode": { - "description": "Specifies the balancing mode for this backend. For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).\n\nFor Internal Load Balancing, the default and only supported mode is CONNECTION.", + "description": "Specifies the balancing mode for the backend.\n\nWhen choosing a balancing mode, you need to consider the loadBalancingScheme, and protocol for the backend service, as well as the type of backend (instance group or NEG).\n\n \n- If the load balancing mode is CONNECTION, then the load is spread based on how many concurrent connections the backend can handle.\nYou can use the CONNECTION balancing mode if the protocol for the backend service is SSL, TCP, or UDP.\n\nIf the loadBalancingScheme for the backend service is EXTERNAL (SSL Proxy and TCP Proxy load balancers), you must also specify exactly one of the following parameters: maxConnections, maxConnectionsPerInstance, or maxConnectionsPerEndpoint.\n\nIf the loadBalancingScheme for the backend service is INTERNAL (internal TCP/UDP load balancers), you cannot specify any additional parameters.\n \n- If the load balancing mode is RATE, the load is spread based on the rate of HTTP requests per second (RPS).\nYou can use the RATE balancing mode if the protocol for the backend service is HTTP or HTTPS. You must specify exactly one of the following parameters: maxRate, maxRatePerInstance, or maxRatePerEndpoint.\n \n- If the load balancing mode is UTILIZATION, the load is spread based on the CPU utilization of instances in an instance group.\nYou can use the UTILIZATION balancing mode if the loadBalancingScheme of the backend service is EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED and the backends are instance groups. There are no restrictions on the backend service protocol.", "enum": [ "CONNECTION", "RATE", @@ -19992,21 +23408,21 @@ "type": "string" }, "group": { - "description": "The fully-qualified URL of an Instance Group or Network Endpoint Group resource. In case of instance group this defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nFor Network Endpoint Groups this defines list of endpoints. All endpoints of Network Endpoint Group must be hosted on instances located in the same zone as the Network Endpoint Group.\n\nBackend service can not contain mix of Instance Group and Network Endpoint Group backends.\n\nNote that you must specify an Instance Group or Network Endpoint Group resource using the fully-qualified URL, rather than a partial URL.\n\nWhen the BackendService has load balancing scheme INTERNAL, the instance group must be within the same region as the BackendService. Network Endpoint Groups are not supported for INTERNAL load balancing scheme.", + "description": "The fully-qualified URL of an instance group or network endpoint group (NEG) resource. The type of backend that a backend service supports depends on the backend service's loadBalancingScheme.\n\n \n- When the loadBalancingScheme for the backend service is EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, the backend can be either an instance group or a NEG. The backends on the backend service must be either all instance groups or all NEGs. You cannot mix instance group and NEG backends on the same backend service. \n\n\n- When the loadBalancingScheme for the backend service is INTERNAL, the backend must be an instance group in the same region as the backend service. NEGs are not supported. \n\nYou must use the fully-qualified URL (starting with https://www.googleapis.com/) to specify the instance group or NEG. Partial URLs are not supported.", "type": "string" }, "maxConnections": { - "description": "The max number of simultaneous connections for the group. Can be used with either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "description": "Defines a maximum target for simultaneous connections for the entire backend (instance group or NEG). If the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is CONNECTION, and backend is attached to a backend service whose loadBalancingScheme is EXTERNAL, you must specify either this parameter, maxConnectionsPerInstance, or maxConnectionsPerEndpoint.\n\nNot available if the backend's balancingMode is RATE. If the loadBalancingScheme is INTERNAL, then maxConnections is not supported, even though the backend requires a balancing mode of CONNECTION.", "format": "int32", "type": "integer" }, "maxConnectionsPerEndpoint": { - "description": "The max number of simultaneous connections that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", + "description": "Defines a maximum target for simultaneous connections for an endpoint of a NEG. This is multiplied by the number of endpoints in the NEG to implicitly calculate a maximum number of target maximum simultaneous connections for the NEG. If the backend's balancingMode is CONNECTION, and the backend is attached to a backend service whose loadBalancingScheme is EXTERNAL, you must specify either this parameter, maxConnections, or maxConnectionsPerInstance.\n\nNot available if the backend's balancingMode is RATE. Internal TCP/UDP load balancing does not support setting maxConnectionsPerEndpoint even though its backends require a balancing mode of CONNECTION.", "format": "int32", "type": "integer" }, "maxConnectionsPerInstance": { - "description": "The max number of simultaneous connections that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "description": "Defines a maximum target for simultaneous connections for a single VM in a backend instance group. This is multiplied by the number of instances in the instance group to implicitly calculate a target maximum number of simultaneous connections for the whole instance group. If the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is CONNECTION, and backend is attached to a backend service whose loadBalancingScheme is EXTERNAL, you must specify either this parameter, maxConnections, or maxConnectionsPerEndpoint.\n\nNot available if the backend's balancingMode is RATE. Internal TCP/UDP load balancing does not support setting maxConnectionsPerInstance even though its backends require a balancing mode of CONNECTION.", "format": "int32", "type": "integer" }, @@ -20016,17 +23432,17 @@ "type": "integer" }, "maxRatePerEndpoint": { - "description": "The max requests per second (RPS) that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", + "description": "Defines a maximum target for requests per second (RPS) for an endpoint of a NEG. This is multiplied by the number of endpoints in the NEG to implicitly calculate a target maximum rate for the NEG.\n\nIf the backend's balancingMode is RATE, you must specify either this parameter, maxRate, or maxRatePerInstance.\n\nNot available if the backend's balancingMode is CONNECTION.", "format": "float", "type": "number" }, "maxRatePerInstance": { - "description": "The max requests per second (RPS) that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "description": "Defines a maximum target for requests per second (RPS) for a single VM in a backend instance group. This is multiplied by the number of instances in the instance group to implicitly calculate a target maximum rate for the whole instance group.\n\nIf the backend's balancingMode is UTILIZATION, this is an optional parameter. If the backend's balancingMode is RATE, you must specify either this parameter, maxRate, or maxRatePerEndpoint.\n\nNot available if the backend's balancingMode is CONNECTION.", "format": "float", "type": "number" }, "maxUtilization": { - "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].\n\nThis cannot be used for internal load balancing.", + "description": "Defines the maximum average CPU utilization of a backend VM in an instance group. The valid range is [0.0, 1.0]. This is an optional parameter if the backend's balancingMode is UTILIZATION.\n\nThis parameter can be used in conjunction with maxRate, maxRatePerInstance, maxConnections, or maxConnectionsPerInstance.", "format": "float", "type": "number" } @@ -20034,7 +23450,7 @@ "type": "object" }, "BackendBucket": { - "description": "A BackendBucket resource. This resource defines a Cloud Storage bucket.", + "description": "Represents a Cloud Storage Bucket resource.\n\nThis Cloud Storage bucket resource is referenced by a URL map of a load balancer. For more information, read Backend Buckets.", "id": "BackendBucket", "properties": { "bucketName": { @@ -20211,11 +23627,11 @@ "type": "object" }, "BackendService": { - "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity. (== resource_for v1.backendService ==) (== resource_for beta.backendService ==)", + "description": "Represents a Backend Service resource.\n\nA backend service contains configuration values for Google Cloud Platform load balancing services.\n\nFor more information, read Backend Services.\n\n(== resource_for v1.backendService ==) (== resource_for beta.backendService ==)", "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "description": "If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400).", "format": "int32", "type": "integer" }, @@ -20230,9 +23646,17 @@ "$ref": "BackendServiceCdnPolicy", "description": "Cloud CDN configuration for this BackendService." }, + "circuitBreakers": { + "$ref": "CircuitBreakers", + "description": "Settings controlling the volume of connections to a backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, "connectionDraining": { "$ref": "ConnectionDraining" }, + "consistentHash": { + "$ref": "ConsistentHashLoadBalancerSettings", + "description": "Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -20249,7 +23673,7 @@ "type": "string" }, "enableCDN": { - "description": "If true, enable Cloud CDN for this BackendService.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "description": "If true, enables Cloud CDN for the backend service. Only applicable if the loadBalancingScheme is EXTERNAL and the protocol is HTTP or HTTPS.", "type": "boolean" }, "fingerprint": { @@ -20278,13 +23702,39 @@ "type": "string" }, "loadBalancingScheme": { - "description": "Indicates whether the backend service will be used with internal or external load balancing. A backend service created for one type of load balancing cannot be used with the other. Possible values are INTERNAL and EXTERNAL.", + "description": "Specifies the load balancer type. Choose EXTERNAL for load balancers that receive traffic from external clients. Choose INTERNAL for Internal TCP/UDP Load Balancing. Choose INTERNAL_MANAGED for Internal HTTP(S) Load Balancing. Choose INTERNAL_SELF_MANAGED for Traffic Director. A backend service created for one type of load balancing cannot be used with another. For more information, refer to Choosing a load balancer.", "enum": [ "EXTERNAL", "INTERNAL", + "INTERNAL_MANAGED", + "INTERNAL_SELF_MANAGED", "INVALID_LOAD_BALANCING_SCHEME" ], "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "localityLbPolicy": { + "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.", + "enum": [ + "INVALID_LB_POLICY", + "LEAST_REQUEST", + "MAGLEV", + "ORIGINAL_DESTINATION", + "RANDOM", + "RING_HASH", + "ROUND_ROBIN" + ], + "enumDescriptions": [ + "", + "", + "", + "", "", "", "" @@ -20296,17 +23746,21 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "outlierDetection": { + "$ref": "OutlierDetection", + "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, "port": { - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used for internal load balancing.", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used if the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Balancing).", "format": "int32", "type": "integer" }, "portName": { - "description": "Name of backend port. The same name should appear in the instance groups referenced by this service. Required when the load balancing scheme is EXTERNAL.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. Required when the loadBalancingScheme is EXTERNAL and the backends are instance groups. The named port must be defined on each backend instance group. This parameter has no meaning if the backends are NEGs.\n\n\n\nMust be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Blaancing).", "type": "string" }, "protocol": { - "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, TCP, and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.", + "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP, depending on the chosen load balancer or Traffic Director configuration. Refer to the documentation for the load balancer or for Traffic Director for more information.", "enum": [ "HTTP", "HTTP2", @@ -20338,12 +23792,14 @@ "type": "string" }, "sessionAffinity": { - "description": "Type of session affinity to use. The default is NONE.\n\nWhen the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.\n\nWhen the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the protocol is UDP, this field is not used.", + "description": "Type of session affinity to use. The default is NONE. Session affinity is not applicable if the --protocol is UDP.\n\nWhen the loadBalancingScheme is EXTERNAL, possible values are NONE, CLIENT_IP, or GENERATED_COOKIE. You can use GENERATED_COOKIE if the protocol is HTTP or HTTPS.\n\nWhen the loadBalancingScheme is INTERNAL, possible values are NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the loadBalancingScheme is INTERNAL_SELF_MANAGED, possible values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or HTTP_COOKIE.", "enum": [ "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", + "HEADER_FIELD", + "HTTP_COOKIE", "NONE" ], "enumDescriptions": [ @@ -20351,12 +23807,14 @@ "", "", "", + "", + "", "" ], "type": "string" }, "timeoutSec": { - "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.", + "description": "The backend service timeout has a different meaning depending on the type of load balancer. For more information read, Backend service settings The default is 30 seconds.", "format": "int32", "type": "integer" } @@ -20761,7 +24219,7 @@ "description": "The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.", "items": { "type": "string" }, @@ -20820,8 +24278,40 @@ }, "type": "object" }, + "CircuitBreakers": { + "description": "Settings controlling the volume of connections to a backend service.", + "id": "CircuitBreakers", + "properties": { + "maxConnections": { + "description": "The maximum number of connections to the backend service. If not specified, there is no limit.", + "format": "int32", + "type": "integer" + }, + "maxPendingRequests": { + "description": "The maximum number of pending requests allowed to the backend service. If not specified, there is no limit.", + "format": "int32", + "type": "integer" + }, + "maxRequests": { + "description": "The maximum number of parallel requests that allowed to the backend service. If not specified, there is no limit.", + "format": "int32", + "type": "integer" + }, + "maxRequestsPerConnection": { + "description": "Maximum requests for a single connection to the backend service. This parameter is respected by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there is no limit. Setting this parameter to 1 will effectively disable keep alive.", + "format": "int32", + "type": "integer" + }, + "maxRetries": { + "description": "The maximum number of parallel retries allowed to the backend cluster. If not specified, the default is 1.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "Commitment": { - "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts. (== resource_for beta.commitments ==) (== resource_for v1.commitments ==)", + "description": "Represents a regional Commitment resource.\n\nCreating a commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts. (== resource_for beta.regionCommitments ==) (== resource_for v1.regionCommitments ==)", "id": "Commitment", "properties": { "creationTimestamp": { @@ -20869,6 +24359,13 @@ "description": "[Output Only] URL of the region where this commitment may be used.", "type": "string" }, + "reservations": { + "description": "List of reservations in this commitment.", + "items": { + "$ref": "Reservation" + }, + "type": "array" + }, "resources": { "description": "A list of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.", "items": { @@ -21308,7 +24805,101 @@ "id": "ConnectionDraining", "properties": { "drainingTimeoutSec": { - "description": "Time for which instance will be drained (not accept new connections, but still work to finish started).", + "description": "The amount of time in seconds to allow existing connections to persist while on unhealthy backend VMs. Only applicable if the protocol is not UDP. The valid range is [0, 3600].", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ConsistentHashLoadBalancerSettings": { + "description": "This message defines settings for a consistent hash style load balancer.", + "id": "ConsistentHashLoadBalancerSettings", + "properties": { + "httpCookie": { + "$ref": "ConsistentHashLoadBalancerSettingsHttpCookie", + "description": "Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load balancer. If the cookie is not present, it will be generated. This field is applicable if the sessionAffinity is set to HTTP_COOKIE." + }, + "httpHeaderName": { + "description": "The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD.", + "type": "string" + }, + "minimumRingSize": { + "description": "The minimum number of virtual nodes to use for the hash ring. Defaults to 1024. Larger ring sizes result in more granular load distributions. If the number of hosts in the load balancing pool is larger than the ring size, each host will be assigned a single virtual node.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ConsistentHashLoadBalancerSettingsHttpCookie": { + "description": "The information about the HTTP Cookie on which the hash function is based for load balancing policies that use a consistent hash.", + "id": "ConsistentHashLoadBalancerSettingsHttpCookie", + "properties": { + "name": { + "description": "Name of the cookie.", + "type": "string" + }, + "path": { + "description": "Path to set for the cookie.", + "type": "string" + }, + "ttl": { + "$ref": "Duration", + "description": "Lifetime of the cookie." + } + }, + "type": "object" + }, + "CorsPolicy": { + "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing", + "id": "CorsPolicy", + "properties": { + "allowCredentials": { + "description": "In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This translates to the Access-Control-Allow-Credentials header.\nDefault is false.", + "type": "boolean" + }, + "allowHeaders": { + "description": "Specifies the content for the Access-Control-Allow-Headers header.", + "items": { + "type": "string" + }, + "type": "array" + }, + "allowMethods": { + "description": "Specifies the content for the Access-Control-Allow-Methods header.", + "items": { + "type": "string" + }, + "type": "array" + }, + "allowOriginRegexes": { + "description": "Specifies the regualar expression patterns that match allowed origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript \nAn origin is allowed if it matches either allow_origins or allow_origin_regex.", + "items": { + "type": "string" + }, + "type": "array" + }, + "allowOrigins": { + "description": "Specifies the list of origins that will be allowed to do CORS requests.\nAn origin is allowed if it matches either allow_origins or allow_origin_regex.", + "items": { + "type": "string" + }, + "type": "array" + }, + "disabled": { + "description": "If true, specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect.", + "type": "boolean" + }, + "exposeHeaders": { + "description": "Specifies the content for the Access-Control-Expose-Headers header.", + "items": { + "type": "string" + }, + "type": "array" + }, + "maxAge": { + "description": "Specifies how long the results of a preflight request can be cached. This translates to the content for the Access-Control-Max-Age header.", "format": "int32", "type": "integer" } @@ -21388,7 +24979,7 @@ "type": "object" }, "Disk": { - "description": "A Disk resource. (== resource_for beta.disks ==) (== resource_for v1.disks ==)", + "description": "Represents a Persistent Disk resource.\n\nPersistent disks are required for running your VM instances. Create both boot and non-boot (data) persistent disks. For more information, read Persistent Disks. For more storage options, read Storage options.\n\nThe disks resource represents a zonal persistent disk. For more information, read Zonal persistent disks.\n\nThe regionDisks resource represents a regional persistent disk. For more information, read Regional resources. (== resource_for beta.disks ==) (== resource_for v1.disks ==) (== resource_for v1.regionDisks ==) (== resource_for beta.regionDisks ==)", "id": "Disk", "properties": { "creationTimestamp": { @@ -21401,7 +24992,7 @@ }, "diskEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." + "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot, to create a disk image, to create a machine image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", @@ -21485,6 +25076,13 @@ }, "type": "array" }, + "resourcePolicies": { + "description": "Resource policies applied to this disk for automatic snapshot creations.", + "items": { + "type": "string" + }, + "type": "array" + }, "selfLink": { "description": "[Output Only] Server-defined fully-qualified URL for this resource.", "type": "string" @@ -21519,7 +25117,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of disk creation.", + "description": "[Output Only] The status of disk creation. CREATING: Disk is provisioning. RESTORING: Source data is being copied into the disk. FAILED: Disk creation failed. READY: Disk is ready for use. DELETING: Disk is deleting.", "enum": [ "CREATING", "DELETING", @@ -21537,11 +25135,11 @@ "type": "string" }, "type": { - "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: project/zones/zone/diskTypes/pd-standard or pd-ssd", + "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", "type": "string" }, "users": { - "description": "[Output Only] Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance", + "description": "[Output Only] Links to the users of the disk (attached instances) in form: projects/project/zones/zone/instances/instance", "items": { "type": "string" }, @@ -21834,7 +25432,7 @@ "type": "object" }, "DiskType": { - "description": "A DiskType resource. (== resource_for beta.diskTypes ==) (== resource_for v1.diskTypes ==)", + "description": "Represents a Disk Type resource.\n\nYou can choose from a variety of disk types based on your needs. For more information, read Storage options.\n\nThe diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks.\n\nThe regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks. (== resource_for beta.diskTypes ==) (== resource_for v1.diskTypes ==) (== resource_for v1.regionDiskTypes ==) (== resource_for beta.regionDiskTypes ==)", "id": "DiskType", "properties": { "creationTimestamp": { @@ -22206,6 +25804,32 @@ }, "type": "object" }, + "DisksAddResourcePoliciesRequest": { + "id": "DisksAddResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be added to this disk. Currently you can only specify one policy here.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "DisksRemoveResourcePoliciesRequest": { + "id": "DisksRemoveResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be removed from this disk.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "DisksResizeRequest": { "id": "DisksResizeRequest", "properties": { @@ -22311,6 +25935,17 @@ }, "type": "object" }, + "DisplayDevice": { + "description": "A set of Display Device options", + "id": "DisplayDevice", + "properties": { + "enableDisplay": { + "description": "Defines whether the instance has Display enabled.", + "type": "boolean" + } + }, + "type": "object" + }, "DistributionPolicy": { "id": "DistributionPolicy", "properties": { @@ -22339,6 +25974,23 @@ }, "type": "object" }, + "Duration": { + "description": "A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like \"day\" or \"month\". Range is approximately 10,000 years.", + "id": "Duration", + "properties": { + "nanos": { + "description": "Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive.", + "format": "int32", + "type": "integer" + }, + "seconds": { + "description": "Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "Expr": { "description": "Represents an expression text. Example:\n\ntitle: \"User account presence\" description: \"Determines whether the request has a user account\" expression: \"size(request.user) \u003e 0\"", "id": "Expr", @@ -22362,8 +26014,211 @@ }, "type": "object" }, + "ExternalVpnGateway": { + "description": "External VPN gateway is the on-premises VPN gateway(s) or another cloud provider?s VPN gateway that connects to your Google Cloud VPN gateway. To create a highly available VPN from Google Cloud to your on-premises side or another Cloud provider's VPN gateway, you must create a external VPN gateway resource in GCP, which provides the information to GCP about your external VPN gateway.", + "id": "ExternalVpnGateway", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "interfaces": { + "description": "List of interfaces for this external VPN gateway.", + "items": { + "$ref": "ExternalVpnGatewayInterface" + }, + "type": "array" + }, + "kind": { + "default": "compute#externalVpnGateway", + "description": "[Output Only] Type of the resource. Always compute#externalVpnGateway for externalVpnGateways.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this ExternalVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an ExternalVpnGateway.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this ExternalVpnGateway resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "type": "object" + }, + "name": { + "annotations": { + "required": [ + "compute.externalVpnGateways.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "redundancyType": { + "description": "Indicates the user-supplied redundancy type of this external VPN gateway.", + "enum": [ + "FOUR_IPS_REDUNDANCY", + "SINGLE_IP_INTERNALLY_REDUNDANT", + "TWO_IPS_REDUNDANCY" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + } + }, + "type": "object" + }, + "ExternalVpnGatewayInterface": { + "description": "The interface for the external VPN gateway.", + "id": "ExternalVpnGatewayInterface", + "properties": { + "id": { + "description": "The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: SINGLE_IP_INTERNALLY_REDUNDANT - 0 TWO_IPS_REDUNDANCY - 0, 1 FOUR_IPS_REDUNDANCY - 0, 1, 2, 3", + "format": "uint32", + "type": "integer" + }, + "ipAddress": { + "description": "IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider?s VPN gateway, it cannot be an IP address from Google Compute Engine.", + "type": "string" + } + }, + "type": "object" + }, + "ExternalVpnGatewayList": { + "description": "Response to the list request, and contains a list of externalVpnGateways.", + "id": "ExternalVpnGatewayList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of ExternalVpnGateway resources.", + "items": { + "$ref": "ExternalVpnGateway" + }, + "type": "array" + }, + "kind": { + "default": "compute#externalVpnGatewayList", + "description": "[Output Only] Type of resource. Always compute#externalVpnGatewayList for lists of externalVpnGateways.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "Firewall": { - "description": "Represents a Firewall resource.", + "description": "Represents a Firewall Rule resource.\n\nFirewall rules allow or deny ingress traffic to, and egress traffic from your instances. For more information, read Firewall rules.", "id": "Firewall", "properties": { "allowed": { @@ -22371,11 +26226,11 @@ "items": { "properties": { "IPProtocol": { - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number.", "type": "string" }, "ports": { - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "description": "An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", "items": { "type": "string" }, @@ -22395,11 +26250,11 @@ "items": { "properties": { "IPProtocol": { - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number.", "type": "string" }, "ports": { - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "description": "An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", "items": { "type": "string" }, @@ -22411,18 +26266,18 @@ "type": "array" }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "An optional description of this resource. Provide this field when you create the resource.", "type": "string" }, "destinationRanges": { - "description": "If destination ranges are specified, the firewall will apply only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Only IPv4 is supported.", + "description": "If destination ranges are specified, the firewall rule applies only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Only IPv4 is supported.", "items": { "type": "string" }, "type": "array" }, "direction": { - "description": "Direction of traffic to which this firewall applies; default is INGRESS. Note: For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS traffic, it is NOT supported to specify sourceRanges OR sourceTags.", + "description": "Direction of traffic to which this firewall applies, either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `INGRESS` traffic, you cannot specify the destinationRanges field, and for `EGRESS` traffic, you cannot specify the sourceRanges or sourceTags fields.", "enum": [ "EGRESS", "INGRESS" @@ -22434,7 +26289,7 @@ "type": "string" }, "disabled": { - "description": "Denotes whether the firewall rule is disabled, i.e not applied to the network it is associated with. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled.", + "description": "Denotes whether the firewall rule is disabled. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled.", "type": "boolean" }, "id": { @@ -22458,16 +26313,16 @@ "compute.firewalls.patch" ] }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "network": { - "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default", + "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this field, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default", "type": "string" }, "priority": { - "description": "Priority for this rule. This is an integer between 0 and 65535, both inclusive. When not specified, the value assumed is 1000. Relative priorities determine precedence of conflicting rules. Lower value of priority implies higher precedence (eg, a rule with priority 0 has higher precedence than a rule with priority 1). DENY rules take precedence over ALLOW rules having equal priority.", + "description": "Priority for this rule. This is an integer between `0` and `65535`, both inclusive. The default value is `1000`. Relative priorities determine which rule takes effect if multiple rules apply. Lower values indicate higher priority. For example, a rule with priority `0` has higher precedence than a rule with priority `1`. DENY rules take precedence over ALLOW rules if they have equal priority. Note that VPC networks have implied rules with a priority of `65535`. To avoid conflicts with the implied rules, use a priority number less than `65535`.", "format": "int32", "type": "integer" }, @@ -22476,21 +26331,21 @@ "type": "string" }, "sourceRanges": { - "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply. Only IPv4 is supported.", + "description": "If source ranges are specified, the firewall rule applies only to traffic that has a source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both fields are set, the rule applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the rule to apply. Only IPv4 is supported.", "items": { "type": "string" }, "type": "array" }, "sourceServiceAccounts": { - "description": "If source service accounts are specified, the firewall will apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both properties for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", + "description": "If source service accounts are specified, the firewall rules apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall applies to traffic that has a source IP address within the sourceRanges OR a source IP that belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both fields for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", "items": { "type": "string" }, "type": "array" }, "sourceTags": { - "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", + "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both fields are set, the firewall applies to traffic that has a source IP address within sourceRanges OR a source IP from a resource with a matching tag listed in the sourceTags field. The connection does not need to match both fields for the firewall to apply.", "items": { "type": "string" }, @@ -22659,15 +26514,15 @@ "type": "object" }, "ForwardingRule": { - "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== resource_for beta.forwardingRules ==) (== resource_for v1.forwardingRules ==) (== resource_for beta.globalForwardingRules ==) (== resource_for v1.globalForwardingRules ==) (== resource_for beta.regionForwardingRules ==) (== resource_for v1.regionForwardingRules ==)", + "description": "Represents a Forwarding Rule resource.\n\nA forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway).\n\nFor more information, read Forwarding rule concepts and Using protocol forwarding.\n\n(== resource_for beta.forwardingRules ==) (== resource_for v1.forwardingRules ==) (== resource_for beta.globalForwardingRules ==) (== resource_for v1.globalForwardingRules ==) (== resource_for beta.regionForwardingRules ==) (== resource_for v1.regionForwardingRules ==)", "id": "ForwardingRule", "properties": { "IPAddress": { - "description": "The IP address that this forwarding rule is serving on behalf of.\n\nAddresses are restricted based on the forwarding rule's load balancing scheme (EXTERNAL or INTERNAL) and scope (global or regional).\n\nWhen the load balancing scheme is EXTERNAL, for global forwarding rules, the address must be a global IP, and for regional forwarding rules, the address must live in the same region as the forwarding rule. If this field is empty, an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL_SELF_MANAGED, this must be a URL reference to an existing Address resource ( internal regional static IP address), with a purpose of GCE_END_POINT and address_type of INTERNAL.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnet configured for the forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule.\n\nAn address can be specified either by a literal IP address or a URL reference to an existing Address resource. The following examples are all valid: \n- 100.1.2.3 \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address \n- projects/project/regions/region/addresses/address \n- regions/region/addresses/address \n- global/addresses/address \n- address", + "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule.\n\nIf you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address:\n\n* IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: * projects/project_id/regions/region/addresses/address-name * regions/region/addresses/address-name * global/addresses/address-name * address-name \n\nThe loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", "type": "string" }, "IPProtocol": { - "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL, only TCP and UDP are valid. When the load balancing scheme is INTERNAL_SELF_MANAGED, only TCPis valid.", + "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nFor Internal TCP/UDP Load Balancing, the load balancing scheme is INTERNAL, and one of TCP or UDP are valid. For Traffic Director, the load balancing scheme is INTERNAL_SELF_MANAGED, and only TCPis valid. For Internal HTTP(S) Load Balancing, the load balancing scheme is INTERNAL_MANAGED, and only TCP is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing, the load balancing scheme is EXTERNAL and only TCP is valid. For Network TCP/UDP Load Balancing, the load balancing scheme is EXTERNAL, and one of TCP or UDP is valid.", "enum": [ "AH", "ESP", @@ -22727,19 +26582,30 @@ "type": "string" }, "loadBalancingScheme": { - "description": "This signifies what the ForwardingRule will be used for and can only take the following values: INTERNAL, INTERNAL_SELF_MANAGED, EXTERNAL. The value of INTERNAL means that this will be used for Internal Network Load Balancing (TCP, UDP). The value of INTERNAL_SELF_MANAGED means that this will be used for Internal Global HTTP(S) LB. The value of EXTERNAL means that this will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)", + "description": "Specifies the forwarding rule type. EXTERNAL is used for: - Classic Cloud VPN gateways - Protocol forwarding to VMs from an external IP address - The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP.\n\nINTERNAL is used for: - Protocol forwarding to VMs from an internal IP address - Internal TCP/UDP load balancers\n\nINTERNAL_MANAGED is used for: - Internal HTTP(S) load balancers\n\nINTERNAL_SELF_MANAGED is used for: - Traffic Director\n\nFor more information about forwarding rules, refer to Forwarding rule concepts.", "enum": [ "EXTERNAL", "INTERNAL", + "INTERNAL_MANAGED", + "INTERNAL_SELF_MANAGED", "INVALID" ], "enumDescriptions": [ + "", + "", "", "", "" ], "type": "string" }, + "metadataFilters": { + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nmetadataFilters specified here can be overridden by those specified in the UrlMap that this ForwardingRule references.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "items": { + "$ref": "MetadataFilter" + }, + "type": "array" + }, "name": { "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -22762,11 +26628,11 @@ "type": "string" }, "portRange": { - "description": "This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nSome types of forwarding target have constraints on the acceptable ports: \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500", + "description": "This field is deprecated. See the port\nfield.", "type": "string" }, "ports": { - "description": "This field is used along with the backend_service field for internal load balancing.\n\nWhen the load balancing scheme is INTERNAL, a list of ports can be configured, for example, ['80'], ['8000','9000'] etc. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule.\n\nYou may specify a maximum of up to 5 ports.", + "description": "List of comma-separated ports. The forwarding rule forwards packets with matching destination ports. If the forwarding rule's loadBalancingScheme is EXTERNAL, and the forwarding rule references a target pool, specifying ports is optional. You can specify an unlimited number of ports, but they must be contiguous. If you omit ports, GCP forwards traffic on any port of the forwarding rule's protocol.\n\nIf the forwarding rule's loadBalancingScheme is EXTERNAL, and the forwarding rule references a target HTTP proxy, target HTTPS proxy, target TCP proxy, target SSL proxy, or target VPN gateway, you must specify ports using the following constraints:\n\n \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500 \n\nIf the forwarding rule's loadBalancingScheme is INTERNAL, you must specify ports in one of the following ways:\n\n* A list of up to five ports, which can be non-contiguous * Keyword ALL, which causes the forwarding rule to forward traffic on any port of the forwarding rule's protocol.\n\nThe ports field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.", "items": { "type": "string" }, @@ -22781,7 +26647,7 @@ "type": "string" }, "serviceLabel": { - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, will be the first label of the fully qualified service name.\n\nThe label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.\n\nThis field is only used for internal load balancing.", + "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name.\n\nThe label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.\n\nThis field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, @@ -23167,6 +27033,70 @@ }, "type": "object" }, + "GuestAttributes": { + "description": "A guest attributes entry.", + "id": "GuestAttributes", + "properties": { + "kind": { + "default": "compute#guestAttributes", + "description": "[Output Only] Type of the resource. Always compute#guestAttributes for guest attributes entry.", + "type": "string" + }, + "queryPath": { + "description": "The path to be queried. This can be the default namespace ('/') or a nested namespace ('//') or a specified key ('//')", + "type": "string" + }, + "queryValue": { + "$ref": "GuestAttributesValue", + "description": "[Output Only] The value of the requested queried path." + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "variableKey": { + "description": "The key to search for.", + "type": "string" + }, + "variableValue": { + "description": "[Output Only] The value found for the requested key.", + "type": "string" + } + }, + "type": "object" + }, + "GuestAttributesEntry": { + "description": "A guest attributes namespace/key/value entry.", + "id": "GuestAttributesEntry", + "properties": { + "key": { + "description": "Key for the guest attribute entry.", + "type": "string" + }, + "namespace": { + "description": "Namespace for the guest attribute entry.", + "type": "string" + }, + "value": { + "description": "Value for the guest attribute entry.", + "type": "string" + } + }, + "type": "object" + }, + "GuestAttributesValue": { + "description": "Array of guest attribute namespace/key/value tuples.", + "id": "GuestAttributesValue", + "properties": { + "items": { + "items": { + "$ref": "GuestAttributesEntry" + }, + "type": "array" + } + }, + "type": "object" + }, "GuestOsFeature": { "description": "Guest OS features.", "id": "GuestOsFeature", @@ -23354,7 +27284,7 @@ "type": "object" }, "HealthCheck": { - "description": "An HealthCheck resource. This resource defines a template for how individual virtual machines should be checked for health, via one of the supported protocols.", + "description": "Represents a Health Check resource.\n\nHealth checks are used for most GCP load balancers and managed instance group auto-healing. For more information, read Health Check Concepts.\n\nTo perform health checks on network load balancers, you must use either httpHealthChecks or httpsHealthChecks.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -23399,6 +27329,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "region": { + "description": "[Output Only] Region where the health check resides. Not applicable to global health checks.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -23564,6 +27498,212 @@ }, "type": "object" }, + "HealthChecksAggregatedList": { + "id": "HealthChecksAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "HealthChecksScopedList", + "description": "Name of the scope containing this set of HealthChecks." + }, + "description": "A list of HealthChecksScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#healthChecksAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "HealthChecksScopedList": { + "id": "HealthChecksScopedList", + "properties": { + "healthChecks": { + "description": "A list of HealthChecks contained in this scope.", + "items": { + "$ref": "HealthCheck" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "HealthStatus": { "id": "HealthStatus", "properties": { @@ -23651,8 +27791,149 @@ }, "type": "object" }, + "HttpFaultAbort": { + "description": "Specification for how requests are aborted as part of fault injection.", + "id": "HttpFaultAbort", + "properties": { + "httpStatus": { + "description": "The HTTP status code used to abort the request.\nThe value must be between 200 and 599 inclusive.", + "format": "uint32", + "type": "integer" + }, + "percentage": { + "description": "The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection.\nThe value must be between 0.0 and 100.0 inclusive.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "HttpFaultDelay": { + "description": "Specifies the delay introduced by Loadbalancer before forwarding the request to the backend service as part of fault injection.", + "id": "HttpFaultDelay", + "properties": { + "fixedDelay": { + "$ref": "Duration", + "description": "Specifies the value of the fixed delay interval." + }, + "percentage": { + "description": "The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection.\nThe value must be between 0.0 and 100.0 inclusive.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "HttpFaultInjection": { + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.", + "id": "HttpFaultInjection", + "properties": { + "abort": { + "$ref": "HttpFaultAbort", + "description": "The specification for how client requests are aborted as part of fault injection." + }, + "delay": { + "$ref": "HttpFaultDelay", + "description": "The specification for how client requests are delayed as part of fault injection, before being sent to a backend service." + } + }, + "type": "object" + }, + "HttpHeaderAction": { + "description": "The request and response header transformations that take effect before the request is passed along to the selected backendService.", + "id": "HttpHeaderAction", + "properties": { + "requestHeadersToAdd": { + "description": "Headers to add to a matching request prior to forwarding the request to the backendService.", + "items": { + "$ref": "HttpHeaderOption" + }, + "type": "array" + }, + "requestHeadersToRemove": { + "description": "A list of header names for headers that need to be removed from the request prior to forwarding the request to the backendService.", + "items": { + "type": "string" + }, + "type": "array" + }, + "responseHeadersToAdd": { + "description": "Headers to add the response prior to sending the response back to the client.", + "items": { + "$ref": "HttpHeaderOption" + }, + "type": "array" + }, + "responseHeadersToRemove": { + "description": "A list of header names for headers that need to be removed from the response prior to sending the response back to the client.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "HttpHeaderMatch": { + "description": "matchRule criteria for request header matches.", + "id": "HttpHeaderMatch", + "properties": { + "exactMatch": { + "description": "The value should exactly match contents of exactMatch.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + }, + "headerName": { + "description": "The name of the HTTP header to match.\nFor matching against the HTTP request's authority, use a headerMatch with the header name \":authority\".\nFor matching a request's method, use the headerName \":method\".", + "type": "string" + }, + "invertMatch": { + "description": "If set to false, the headerMatch is considered a match if the match criteria above are met. If set to true, the headerMatch is considered a match if the match criteria above are NOT met.\nThe default setting is false.", + "type": "boolean" + }, + "prefixMatch": { + "description": "The value of the header must start with the contents of prefixMatch.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + }, + "presentMatch": { + "description": "A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value or not.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "boolean" + }, + "rangeMatch": { + "$ref": "Int64RangeMatch", + "description": "The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails.\nFor example for a range [-5, 0] \n- -3 will match. \n- 0 will not match. \n- 0.25 will not match. \n- -3someString will not match. \nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set." + }, + "regexMatch": { + "description": "The value of the header must match the regualar expression specified in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript \nFor matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + }, + "suffixMatch": { + "description": "The value of the header must end with the contents of suffixMatch.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + } + }, + "type": "object" + }, + "HttpHeaderOption": { + "description": "Specification determining how headers are added to requests or responses.", + "id": "HttpHeaderOption", + "properties": { + "headerName": { + "description": "The name of the header.", + "type": "string" + }, + "headerValue": { + "description": "The value of the header to add.", + "type": "string" + }, + "replace": { + "description": "If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header.\nThe default value is false.", + "type": "boolean" + } + }, + "type": "object" + }, "HttpHealthCheck": { - "description": "An HttpHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTP.", + "description": "Represents a legacy HTTP Health Check resource.\n\nLegacy health checks are required by network load balancers. For more information, read Health Check Concepts.", "id": "HttpHealthCheck", "properties": { "checkIntervalSec": { @@ -23698,7 +27979,7 @@ "type": "integer" }, "requestPath": { - "description": "The request path of the HTTP health check request. The default value is /.", + "description": "The request path of the HTTP health check request. The default value is /. This field does not support query parameters.", "type": "string" }, "selfLink": { @@ -23830,8 +28111,218 @@ }, "type": "object" }, + "HttpQueryParameterMatch": { + "description": "HttpRouteRuleMatch criteria for a request's query parameter.", + "id": "HttpQueryParameterMatch", + "properties": { + "exactMatch": { + "description": "The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch.\nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "type": "string" + }, + "name": { + "description": "The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails.", + "type": "string" + }, + "presentMatch": { + "description": "Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not.\nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "type": "boolean" + }, + "regexMatch": { + "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "type": "string" + } + }, + "type": "object" + }, + "HttpRedirectAction": { + "description": "Specifies settings for an HTTP redirect.", + "id": "HttpRedirectAction", + "properties": { + "hostRedirect": { + "description": "The host that will be used in the redirect response instead of the one that was supplied in the request.\nThe value must be between 1 and 255 characters.", + "type": "string" + }, + "httpsRedirect": { + "description": "If set to true, the URL scheme in the redirected request is set to https. If set to false, the URL scheme of the redirected request will remain the same as that of the request.\nThis must only be set for UrlMaps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted.\nThe default is set to false.", + "type": "boolean" + }, + "pathRedirect": { + "description": "The path that will be used in the redirect response instead of the one that was supplied in the request.\nOnly one of pathRedirect or prefixRedirect must be specified.\nThe value must be between 1 and 1024 characters.", + "type": "string" + }, + "prefixRedirect": { + "description": "The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request.", + "type": "string" + }, + "redirectResponseCode": { + "description": "The HTTP Status code to use for this RedirectAction.\nSupported values are: \n- MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. \n- FOUND, which corresponds to 302. \n- SEE_OTHER which corresponds to 303. \n- TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method will be retained. \n- PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method will be retained.", + "enum": [ + "FOUND", + "MOVED_PERMANENTLY_DEFAULT", + "PERMANENT_REDIRECT", + "SEE_OTHER", + "TEMPORARY_REDIRECT" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "stripQuery": { + "description": "If set to true, any accompanying query portion of the original URL is removed prior to redirecting the request. If set to false, the query portion of the original URL is retained.\nThe default is set to false.", + "type": "boolean" + } + }, + "type": "object" + }, + "HttpRetryPolicy": { + "description": "The retry policy associates with HttpRouteRule", + "id": "HttpRetryPolicy", + "properties": { + "numRetries": { + "description": "Specifies the allowed number retries. This number must be \u003e 0. If not specified, defaults to 1.", + "format": "uint32", + "type": "integer" + }, + "perTryTimeout": { + "$ref": "Duration", + "description": "Specifies a non-zero timeout per retry attempt.\nIf not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, will use the largest timeout among all backend services associated with the route." + }, + "retryConditions": { + "description": "Specfies one or more conditions when this retry rule applies. Valid values are: \n- 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, example: disconnects, reset, read timeout, connection failure, and refused streams. \n- gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504.\n- \n- connect-failure: Loadbalancer will retry on failures connecting to backend services, for example due to connection timeouts. \n- retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. Currently the only retriable error supported is 409. \n- refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. \n- cancelledLoadbalancer will retry if the gRPC status code in the response header is set to cancelled \n- deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded \n- resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted \n- unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "HttpRouteAction": { + "id": "HttpRouteAction", + "properties": { + "corsPolicy": { + "$ref": "CorsPolicy", + "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing" + }, + "faultInjectionPolicy": { + "$ref": "HttpFaultInjection", + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy." + }, + "requestMirrorPolicy": { + "$ref": "RequestMirrorPolicy", + "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow." + }, + "retryPolicy": { + "$ref": "HttpRetryPolicy", + "description": "Specifies the retry policy associated with this route." + }, + "timeout": { + "$ref": "Duration", + "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries.\nIf not specified, will use the largest timeout among all backend services associated with the route." + }, + "urlRewrite": { + "$ref": "UrlRewrite", + "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service" + }, + "weightedBackendServices": { + "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non 0 number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions like Url rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", + "items": { + "$ref": "WeightedBackendService" + }, + "type": "array" + } + }, + "type": "object" + }, + "HttpRouteRule": { + "description": "An HttpRouteRule specifies how to match an HTTP request and the corresponding routing action that load balancing proxies will perform.", + "id": "HttpRouteRule", + "properties": { + "description": { + "description": "The short description conveying the intent of this routeRule.\nThe description can have a maximum length of 1024 characters.", + "type": "string" + }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here are applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction" + }, + "matchRules": { + "items": { + "$ref": "HttpRouteRuleMatch" + }, + "type": "array" + }, + "priority": { + "description": "For routeRules within a given pathMatcher, priority determines the order in which load balancer will interpret routeRules. RouteRules are evaluated in order of priority, from the lowest to highest number. The priority of a rule decreases as its number increases (1, 2, 3, N+1). The first rule that matches the request is applied.\nYou cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number between 0 and 2147483647 inclusive.\nPriority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules.", + "format": "int32", + "type": "integer" + }, + "routeAction": { + "$ref": "HttpRouteAction", + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + }, + "service": { + "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", + "type": "string" + }, + "urlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + } + }, + "type": "object" + }, + "HttpRouteRuleMatch": { + "description": "HttpRouteRuleMatch specifies a set of criteria for matching requests to an HttpRouteRule. All specified criteria must be satisfied for a match to occur.", + "id": "HttpRouteRuleMatch", + "properties": { + "fullPathMatch": { + "description": "For satifying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL.\nFullPathMatch must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "type": "string" + }, + "headerMatches": { + "description": "Specifies a list of header match criteria, all of which must match corresponding headers in the request.", + "items": { + "$ref": "HttpHeaderMatch" + }, + "type": "array" + }, + "ignoreCase": { + "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\ncaseSensitive must not be used with regexMatch.", + "type": "boolean" + }, + "metadataFilters": { + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nmetadataFilters specified here can be overrides those specified in ForwardingRule that refers to this UrlMap.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "items": { + "$ref": "MetadataFilter" + }, + "type": "array" + }, + "prefixMatch": { + "description": "For satifying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /.\nThe value must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "type": "string" + }, + "queryParameterMatches": { + "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.", + "items": { + "$ref": "HttpQueryParameterMatch" + }, + "type": "array" + }, + "regexMatch": { + "description": "For satifying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "type": "string" + } + }, + "type": "object" + }, "HttpsHealthCheck": { - "description": "An HttpsHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTPS.", + "description": "Represents a legacy HTTPS Health Check resource.\n\nLegacy health checks are required by network load balancers. For more information, read Health Check Concepts.", "id": "HttpsHealthCheck", "properties": { "checkIntervalSec": { @@ -24010,7 +28501,7 @@ "type": "object" }, "Image": { - "description": "An Image resource. (== resource_for beta.images ==) (== resource_for v1.images ==)", + "description": "Represents an Image resource.\n\nYou can use images to create boot disks for your VM instances. For more information, read Images. (== resource_for beta.images ==) (== resource_for v1.images ==)", "id": "Image", "properties": { "archiveSizeBytes": { @@ -24111,7 +28602,7 @@ "type": "string" }, "sha1Checksum": { - "description": "An optional SHA1 checksum of the disk image before unpackaging provided by the client when the disk image is created.", + "description": "[Deprecated] This field is deprecated. An optional SHA1 checksum of the disk image before unpackaging provided by the client when the disk image is created.", "pattern": "[a-f0-9]{40}", "type": "string" }, @@ -24310,7 +28801,7 @@ "type": "object" }, "Instance": { - "description": "An Instance resource. (== resource_for beta.instances ==) (== resource_for v1.instances ==)", + "description": "Represents an Instance resource.\n\nAn instance is a virtual machine that is hosted on Google Cloud Platform. For more information, read Virtual Machine Instances. (== resource_for beta.instances ==) (== resource_for v1.instances ==)", "id": "Instance", "properties": { "canIpForward": { @@ -24340,6 +28831,10 @@ }, "type": "array" }, + "displayDevice": { + "$ref": "DisplayDevice", + "description": "Enables display device for the instance." + }, "guestAccelerators": { "description": "A list of the type and count of accelerator cards attached to the instance.", "items": { @@ -24348,6 +28843,7 @@ "type": "array" }, "hostname": { + "description": "Specifies the hostname of the instance. The specified hostname must be RFC1035 compliant. If hostname is not specified, the default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal when using the global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when using zonal DNS.", "type": "string" }, "id": { @@ -24406,6 +28902,10 @@ }, "type": "array" }, + "reservationAffinity": { + "$ref": "ReservationAffinity", + "description": "Specifies the reservations that this instance can consume from." + }, "scheduling": { "$ref": "Scheduling", "description": "Sets the scheduling options for this instance." @@ -24484,7 +28984,7 @@ "$ref": "InstancesScopedList", "description": "[Output Only] Name of the scope containing this set of instances." }, - "description": "A list of InstancesScopedList resources.", + "description": "An object that contains a list of instances scoped by zone.", "type": "object" }, "kind": { @@ -24585,7 +29085,7 @@ "type": "object" }, "InstanceGroup": { - "description": "InstanceGroups (== resource_for beta.instanceGroups ==) (== resource_for v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) (== resource_for v1.regionInstanceGroups ==)", + "description": "Represents an unmanaged Instance Group resource.\n\nUse unmanaged instance groups if you need to apply load balancing to groups of heterogeneous instances or if you need to manage the instances yourself. For more information, read Instance groups.\n\nFor zonal unmanaged Instance Group, use instanceGroups resource.\n\nFor regional unmanaged Instance Group, use regionInstanceGroups resource. (== resource_for beta.instanceGroups ==) (== resource_for v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) (== resource_for v1.regionInstanceGroups ==)", "id": "InstanceGroup", "properties": { "creationTimestamp": { @@ -24881,7 +29381,7 @@ "type": "object" }, "InstanceGroupManager": { - "description": "An Instance Group Manager resource. (== resource_for beta.instanceGroupManagers ==) (== resource_for v1.instanceGroupManagers ==) (== resource_for beta.regionInstanceGroupManagers ==) (== resource_for v1.regionInstanceGroupManagers ==)", + "description": "Represents a Managed Instance Group resource.\n\nAn instance group is a collection of VM instances that you can manage as a single entity. For more information, read Instance groups.\n\nFor zonal Managed Instance Group, use the instanceGroupManagers resource.\n\nFor regional Managed Instance Group, use the regionInstanceGroupManagers resource. (== resource_for beta.instanceGroupManagers ==) (== resource_for v1.instanceGroupManagers ==) (== resource_for beta.regionInstanceGroupManagers ==) (== resource_for v1.regionInstanceGroupManagers ==)", "id": "InstanceGroupManager", "properties": { "autoHealingPolicies": { @@ -25331,6 +29831,7 @@ "type": "string" }, "type": { + "description": "The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls).", "enum": [ "OPPORTUNISTIC", "PROACTIVE" @@ -26098,6 +30599,10 @@ }, "type": "array" }, + "reservationAffinity": { + "$ref": "ReservationAffinity", + "description": "Specifies the reservations that this instance can consume from." + }, "scheduling": { "$ref": "Scheduling", "description": "Specifies the scheduling options for the instances that are created from this template." @@ -26130,7 +30635,7 @@ "type": "object" }, "InstanceTemplate": { - "description": "An Instance Template resource. (== resource_for beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==)", + "description": "Represents an Instance Template resource.\n\nYou can use instance templates to create VM instances and managed instance groups. For more information, read Instance Templates. (== resource_for beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==)", "id": "InstanceTemplate", "properties": { "creationTimestamp": { @@ -26509,8 +31014,25 @@ }, "type": "object" }, + "Int64RangeMatch": { + "description": "HttpRouteRuleMatch criteria for field values that must stay within the specified integer range.", + "id": "Int64RangeMatch", + "properties": { + "rangeEnd": { + "description": "The end of the range (exclusive) in signed long integer format.", + "format": "int64", + "type": "string" + }, + "rangeStart": { + "description": "The start of the range (inclusive) in signed long integer format.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "Interconnect": { - "description": "Represents an Interconnects resource. The Interconnects resource is a dedicated connection between Google's network and your on-premises network. For more information, see the Dedicated overview page. (== resource_for v1.interconnects ==) (== resource_for beta.interconnects ==)", + "description": "Represents an Interconnect resource.\n\nAn Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview. (== resource_for v1.interconnects ==) (== resource_for beta.interconnects ==)", "id": "Interconnect", "properties": { "adminEnabled": { @@ -26564,7 +31086,7 @@ "type": "array" }, "interconnectType": { - "description": "Type of interconnect. Note that \"IT_PRIVATE\" has been deprecated in favor of \"DEDICATED\"", + "description": "Type of interconnect, which can take one of the following values: \n- PARTNER: A partner-managed interconnection shared between customers though a partner. \n- DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.", "enum": [ "DEDICATED", "IT_PRIVATE", @@ -26583,11 +31105,13 @@ "type": "string" }, "linkType": { - "description": "Type of link requested. This field indicates speed of each of the links in the bundle, not the entire bundle.", + "description": "Type of link requested, which can take one of the following values: \n- LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics \n- LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", "enum": [ + "LINK_TYPE_ETHERNET_100G_LR", "LINK_TYPE_ETHERNET_10G_LR" ], "enumDescriptions": [ + "", "" ], "type": "string" @@ -26611,7 +31135,7 @@ "type": "string" }, "operationalStatus": { - "description": "[Output Only] The current status of whether or not this Interconnect is functional.", + "description": "[Output Only] The current status of this Interconnect's functionality, which can take one of the following values: \n- OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may be provisioned on this Interconnect. \n- OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be provisioned on this Interconnect. \n- OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.", "enum": [ "OS_ACTIVE", "OS_UNPROVISIONED" @@ -26641,7 +31165,7 @@ "type": "string" }, "state": { - "description": "[Output Only] The current state of whether or not this Interconnect is functional.", + "description": "[Output Only] The current state of Interconnect functionality, which can take one of the following values: \n- ACTIVE: The Interconnect is valid, turned up and ready to use. Attachments may be provisioned on this Interconnect. \n- UNPROVISIONED: The Interconnect has not completed turnup. No attachments may be provisioned on this Interconnect. \n- UNDER_MAINTENANCE: The Interconnect is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.", "enum": [ "ACTIVE", "UNPROVISIONED" @@ -26656,7 +31180,7 @@ "type": "object" }, "InterconnectAttachment": { - "description": "Represents an InterconnectAttachment (VLAN attachment) resource. For more information, see Creating VLAN Attachments. (== resource_for beta.interconnectAttachments ==) (== resource_for v1.interconnectAttachments ==)", + "description": "Represents an Interconnect Attachment (VLAN) resource.\n\nYou can use Interconnect attachments (VLANS) to connect your Virtual Private Cloud networks to your on-premises networks through an Interconnect. For more information, read Creating VLAN Attachments. (== resource_for beta.interconnectAttachments ==) (== resource_for v1.interconnectAttachments ==)", "id": "InterconnectAttachment", "properties": { "adminEnabled": { @@ -26664,16 +31188,18 @@ "type": "boolean" }, "bandwidth": { - "description": "Provisioned bandwidth capacity for the interconnectAttachment. Can be set by the partner to update the customer's provisioned bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED.", + "description": "Provisioned bandwidth capacity for the interconnect attachment. For attachments of type DEDICATED, the user can set the bandwidth. For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can take one of the following values: \n- BPS_50M: 50 Mbit/s \n- BPS_100M: 100 Mbit/s \n- BPS_200M: 200 Mbit/s \n- BPS_300M: 300 Mbit/s \n- BPS_400M: 400 Mbit/s \n- BPS_500M: 500 Mbit/s \n- BPS_1G: 1 Gbit/s \n- BPS_2G: 2 Gbit/s \n- BPS_5G: 5 Gbit/s \n- BPS_10G: 10 Gbit/s \n- BPS_20G: 20 Gbit/s \n- BPS_50G: 50 Gbit/s", "enum": [ "BPS_100M", "BPS_10G", "BPS_1G", "BPS_200M", + "BPS_20G", "BPS_2G", "BPS_300M", "BPS_400M", "BPS_500M", + "BPS_50G", "BPS_50M", "BPS_5G" ], @@ -26687,6 +31213,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -26715,7 +31243,7 @@ "type": "string" }, "edgeAvailabilityDomain": { - "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time. For improved reliability, customers should configure a pair of attachments with one per availability domain. The selected availability domain will be provided to the Partner via the pairing key so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", + "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time, and can take one of the following values: \n- AVAILABILITY_DOMAIN_ANY \n- AVAILABILITY_DOMAIN_1 \n- AVAILABILITY_DOMAIN_2 For improved reliability, customers should configure a pair of attachments, one per availability domain. The selected availability domain will be provided to the Partner via the pairing key, so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", "enum": [ "AVAILABILITY_DOMAIN_1", "AVAILABILITY_DOMAIN_2", @@ -26729,7 +31257,7 @@ "type": "string" }, "googleReferenceId": { - "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", + "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues. [Deprecated] This field is not used.", "type": "string" }, "id": { @@ -26752,7 +31280,7 @@ "type": "string" }, "operationalStatus": { - "description": "[Output Only] The current status of whether or not this interconnect attachment is functional.", + "description": "[Output Only] The current status of whether or not this interconnect attachment is functional, which can take one of the following values: \n- OS_ACTIVE: The attachment has been turned up and is ready to use. \n- OS_UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete.", "enum": [ "OS_ACTIVE", "OS_UNPROVISIONED" @@ -26768,7 +31296,7 @@ "type": "string" }, "partnerAsn": { - "description": "Optional BGP ASN for the router that should be supplied by a layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", + "description": "Optional BGP ASN for the router supplied by a Layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", "format": "int64", "type": "string" }, @@ -26793,7 +31321,7 @@ "type": "string" }, "state": { - "description": "[Output Only] The current state of this attachment's functionality.", + "description": "[Output Only] The current state of this attachment's functionality. Enum values ACTIVE and UNPROVISIONED are shared by DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect attachments, while enum values PENDING_PARTNER, PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only PARTNER and PARTNER_PROVIDER interconnect attachments. This state can take one of the following values: \n- ACTIVE: The attachment has been turned up and is ready to use. \n- UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. \n- PENDING_PARTNER: A newly-created PARTNER attachment that has not yet been configured on the Partner side. \n- PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of provisioning after a PARTNER_PROVIDER attachment was created that references it. \n- PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is waiting for a customer to activate it. \n- DEFUNCT: The attachment was deleted externally and is no longer functional. This could be because the associated Interconnect was removed, or because the other side of a Partner attachment was deleted.", "enum": [ "ACTIVE", "DEFUNCT", @@ -26815,6 +31343,7 @@ "type": "string" }, "type": { + "description": "The type of interconnect attachment this is, which can take one of the following values: \n- DEDICATED: an attachment to a Dedicated Interconnect. \n- PARTNER: an attachment to a Partner Interconnect, created by the customer. \n- PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner.", "enum": [ "DEDICATED", "PARTNER", @@ -27072,7 +31601,7 @@ "type": "string" }, "portalUrl": { - "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep-link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", + "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", "type": "string" } }, @@ -27255,6 +31784,7 @@ "type": "string" }, "state": { + "description": "The state of a LACP link, which can take one of the following values: \n- ACTIVE: The link is configured and active within the bundle. \n- DETACHED: The link is not configured within the bundle. This means that the rest of the object should be empty.", "enum": [ "ACTIVE", "DETACHED" @@ -27442,7 +31972,7 @@ "type": "object" }, "InterconnectLocation": { - "description": "Represents an InterconnectLocations resource. The InterconnectLocations resource describes the locations where you can connect to Google's networks. For more information, see Colocation Facilities.", + "description": "Represents an Interconnect Attachment (VLAN) Location resource.\n\nYou can use this resource to find location details about an Interconnect attachment (VLAN). For more information about interconnect attachments, read Creating VLAN Attachments.", "id": "InterconnectLocation", "properties": { "address": { @@ -27458,7 +31988,7 @@ "type": "string" }, "continent": { - "description": "[Output Only] Continent for this location.", + "description": "[Output Only] Continent for this location, which can take one of the following values: \n- AFRICA \n- ASIA_PAC \n- EUROPE \n- NORTH_AMERICA \n- SOUTH_AMERICA", "enum": [ "AFRICA", "ASIA_PAC", @@ -27531,7 +32061,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of this InterconnectLocation. If the status is AVAILABLE, new Interconnects may be provisioned in this InterconnectLocation. Otherwise, no new Interconnects may be provisioned.", + "description": "[Output Only] The status of this InterconnectLocation, which can take one of the following values: \n- CLOSED: The InterconnectLocation is closed and is unavailable for provisioning new Interconnects. \n- AVAILABLE: The InterconnectLocation is available for provisioning new Interconnects.", "enum": [ "AVAILABLE", "CLOSED" @@ -27710,7 +32240,7 @@ "type": "string" }, "issueType": { - "description": "Form this outage is expected to take. Note that the \"IT_\" versions of this enum have been deprecated in favor of the unprefixed values.", + "description": "Form this outage is expected to take, which can take one of the following values: \n- OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. \n- PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", "enum": [ "IT_OUTAGE", "IT_PARTIAL_OUTAGE", @@ -27730,7 +32260,7 @@ "type": "string" }, "source": { - "description": "The party that generated this notification. Note that \"NSRC_GOOGLE\" has been deprecated in favor of \"GOOGLE\"", + "description": "The party that generated this notification, which can take the following value: \n- GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", "enum": [ "GOOGLE", "NSRC_GOOGLE" @@ -27747,10 +32277,11 @@ "type": "string" }, "state": { - "description": "State of this notification. Note that the \"NS_\" versions of this enum have been deprecated in favor of the unprefixed values.", + "description": "State of this notification, which can take one of the following values: \n- ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. \n- CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", "enum": [ "ACTIVE", "CANCELLED", + "COMPLETED", "NS_ACTIVE", "NS_CANCELED" ], @@ -27758,6 +32289,7 @@ "", "", "", + "", "" ], "type": "string" @@ -28078,9 +32610,16 @@ "type": "object" }, "LogConfigCounterOptions": { - "description": "Increment a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only, and end in \"_count\". Field names should not contain an initial slash. The actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are their respective values.\n\nSupported field names: - \"authority\", which is \"[token]\" if IAMContext.token is present, otherwise the value of IAMContext.authority_selector if present, and otherwise a representation of IAMContext.principal; or - \"iam_principal\", a representation of IAMContext.principal even if a token or authority selector is present; or - \"\" (empty string), resulting in a counter with no fields.\n\nExamples: counter { metric: \"/debug_access_count\" field: \"iam_principal\" } ==\u003e increment counter /iam/policy/backend_debug_access_count {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support multiple field names (though this may be supported in the future).", + "description": "Increment a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only, and end in \"_count\". Field names should not contain an initial slash. The actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are their respective values.\n\nSupported field names: - \"authority\", which is \"[token]\" if IAMContext.token is present, otherwise the value of IAMContext.authority_selector if present, and otherwise a representation of IAMContext.principal; or - \"iam_principal\", a representation of IAMContext.principal even if a token or authority selector is present; or - \"\" (empty string), resulting in a counter with no fields.\n\nExamples: counter { metric: \"/debug_access_count\" field: \"iam_principal\" } ==\u003e increment counter /iam/policy/debug_access_count {iam_principal=[value of IAMContext.principal]}\n\nTODO(b/141846426): Consider supporting \"authority\" and \"iam_principal\" fields in the same counter.", "id": "LogConfigCounterOptions", "properties": { + "customFields": { + "description": "Custom fields.", + "items": { + "$ref": "LogConfigCounterOptionsCustomField" + }, + "type": "array" + }, "field": { "description": "The field value to attribute.", "type": "string" @@ -28092,12 +32631,27 @@ }, "type": "object" }, + "LogConfigCounterOptionsCustomField": { + "description": "Custom fields. These can be used to create a counter with arbitrary field/value pairs. See: go/rpcsp-custom-fields.", + "id": "LogConfigCounterOptionsCustomField", + "properties": { + "name": { + "description": "Name is the field name.", + "type": "string" + }, + "value": { + "description": "Value is the field value. It is important that in contrast to the CounterOptions.field, the value here is a constant that is not derived from the IAMContext.", + "type": "string" + } + }, + "type": "object" + }, "LogConfigDataAccessOptions": { "description": "Write a Data Access (Gin) log", "id": "LogConfigDataAccessOptions", "properties": { "logMode": { - "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.\n\nNOTE: Logging to Gin in a fail-closed manner is currently unsupported while work is being done to satisfy the requirements of go/345. Currently, setting LOG_FAIL_CLOSED mode will have no effect, but still exists because there is active work being done to support it (b/115874152).", + "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.", "enum": [ "LOG_FAIL_CLOSED", "LOG_MODE_UNSPECIFIED" @@ -28112,7 +32666,7 @@ "type": "object" }, "MachineType": { - "description": "A Machine Type resource. (== resource_for v1.machineTypes ==) (== resource_for beta.machineTypes ==)", + "description": "Represents a Machine Type resource.\n\nYou can use specific machine types for your VM instances based on performance and pricing requirements. For more information, read Machine Types. (== resource_for v1.machineTypes ==) (== resource_for beta.machineTypes ==)", "id": "MachineType", "properties": { "creationTimestamp": { @@ -28684,6 +33238,49 @@ }, "type": "object" }, + "MetadataFilter": { + "description": "Opaque filter criteria used by loadbalancers to restrict routing configuration to a limited set of loadbalancing proxies. Proxies and sidecars involved in loadbalancing would typically present metadata to the loadbalancers which need to match criteria specified here. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nAn example for using metadataFilters would be: if loadbalancing involves Envoys, they will only receive routing configuration when values in metadataFilters match values supplied in \u003ca href=\"https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/base.proto#envoy-api-msg-core-node\" Node metadata of their XDS requests to loadbalancers.", + "id": "MetadataFilter", + "properties": { + "filterLabels": { + "description": "The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria \nThis list must not be empty and can have at the most 64 entries.", + "items": { + "$ref": "MetadataFilterLabelMatch" + }, + "type": "array" + }, + "filterMatchCriteria": { + "description": "Specifies how individual filterLabel matches within the list of filterLabels contribute towards the overall metadataFilter match.\nSupported values are: \n- MATCH_ANY: At least one of the filterLabels must have a matching label in the provided metadata. \n- MATCH_ALL: All filterLabels must have matching labels in the provided metadata.", + "enum": [ + "MATCH_ALL", + "MATCH_ANY", + "NOT_SET" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "MetadataFilterLabelMatch": { + "description": "MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the loadbalancer.", + "id": "MetadataFilterLabelMatch", + "properties": { + "name": { + "description": "Name of metadata label.\nThe name can have a maximum length of 1024 characters and must be at least 1 character long.", + "type": "string" + }, + "value": { + "description": "The value of the label must match the specified value.\nvalue can have a maximum length of 1024 characters.", + "type": "string" + } + }, + "type": "object" + }, "NamedPort": { "description": "The named port. For example: .", "id": "NamedPort", @@ -28701,7 +33298,7 @@ "type": "object" }, "Network": { - "description": "Represents a Network resource. Read Virtual Private Cloud (VPC) Network Overview for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", + "description": "Represents a VPC Network resource.\n\nNetworks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", "id": "Network", "properties": { "IPv4Range": { @@ -28718,11 +33315,11 @@ "type": "string" }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "An optional description of this resource. Provide this field when you create the resource.", "type": "string" }, "gatewayIPv4": { - "description": "[Output Only] The gateway address for default routing out of the network. This value is read only and is selected by GCP.", + "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", "type": "string" }, @@ -28742,7 +33339,7 @@ "compute.networks.insert" ] }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, @@ -28780,7 +33377,7 @@ "type": "string" }, "ipAddress": { - "description": "Optional IPv4 address of network endpoint. The IP address must belong to a VM in GCE (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used.", + "description": "Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used.", "type": "string" }, "port": { @@ -28792,7 +33389,7 @@ "type": "object" }, "NetworkEndpointGroup": { - "description": "Represents a collection of network endpoints.", + "description": "Represents a collection of network endpoints.\n\nFor more information read Setting up network endpoint groups in load balancing. (== resource_for v1.networkEndpointGroups ==) (== resource_for beta.networkEndpointGroups ==)", "id": "NetworkEndpointGroup", "properties": { "creationTimestamp": { @@ -29353,7 +33950,7 @@ "type": "array" }, "aliasIpRanges": { - "description": "An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.", + "description": "An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks.", "items": { "$ref": "AliasIpRange" }, @@ -29370,19 +33967,19 @@ "type": "string" }, "name": { - "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc.", + "description": "[Output Only] The name of the network interface, which is generated by the server. For network devices, these are eth0, eth1, etc.", "type": "string" }, "network": { - "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default", + "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default", "type": "string" }, "networkIP": { - "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system.", + "description": "An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system.", "type": "string" }, "subnetwork": { - "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork", + "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork", "type": "string" } }, @@ -29505,15 +34102,23 @@ "id": "NetworkPeering", "properties": { "autoCreateRoutes": { - "description": "This field will be deprecated soon. Prefer using exchange_subnet_routes instead. Indicates whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", + "description": "This field will be deprecated soon. Use the exchange_subnet_routes field instead. Indicates whether full mesh connectivity is created and managed automatically between peered networks. Currently this field should always be true since Google Compute Engine will automatically create and manage subnetwork routes between two networks when peering state is ACTIVE.", "type": "boolean" }, "exchangeSubnetRoutes": { - "description": "Whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the peering state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", + "description": "Indicates whether full mesh connectivity is created and managed automatically between peered networks. Currently this field should always be true since Google Compute Engine will automatically create and manage subnetwork routes between two networks when peering state is ACTIVE.", + "type": "boolean" + }, + "exportCustomRoutes": { + "description": "Whether to export the custom routes to peer network.", + "type": "boolean" + }, + "importCustomRoutes": { + "description": "Whether to import the custom routes from peer network.", "type": "boolean" }, "name": { - "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "type": "string" }, "network": { @@ -29521,7 +34126,7 @@ "type": "string" }, "state": { - "description": "[Output Only] State for the peering.", + "description": "[Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The peering is `ACTIVE` when there's a matching configuration in the peer network.", "enum": [ "ACTIVE", "INACTIVE" @@ -29544,7 +34149,7 @@ "id": "NetworkRoutingConfig", "properties": { "routingMode": { - "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnets of this network, across regions.", + "description": "The network-wide routing mode to use. If set to REGIONAL, this network's Cloud Routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's Cloud Routers will advertise routes with all subnets of this network, across regions.", "enum": [ "GLOBAL", "REGIONAL" @@ -29562,7 +34167,7 @@ "id": "NetworksAddPeeringRequest", "properties": { "autoCreateRoutes": { - "description": "This field will be deprecated soon. Prefer using exchange_subnet_routes in network_peering instead. Whether Google Compute Engine manages the routes automatically.", + "description": "This field will be deprecated soon. Use exchange_subnet_routes in network_peering instead. Indicates whether full mesh connectivity is created and managed automatically between peered networks. Currently this field should always be true since Google Compute Engine will automatically create and manage subnetwork routes between two networks when peering state is ACTIVE.", "type": "boolean" }, "name": { @@ -29576,7 +34181,7 @@ }, "networkPeering": { "$ref": "NetworkPeering", - "description": "Network peering parameters. In order to specify route policies for peering using import/export custom routes, you will have to fill all peering related parameters (name, peer network, exchange_subnet_routes) in network_peeringfield. Corresponding fields in NetworksAddPeeringRequest will be deprecated soon." + "description": "Network peering parameters. In order to specify route policies for peering using import and export custom routes, you must specify all peering related parameters (name, peer network, exchange_subnet_routes) in the network_peering field. The corresponding fields in NetworksAddPeeringRequest will be deprecated soon." }, "peerNetwork": { "description": "URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", @@ -29595,8 +34200,17 @@ }, "type": "object" }, + "NetworksUpdatePeeringRequest": { + "id": "NetworksUpdatePeeringRequest", + "properties": { + "networkPeering": { + "$ref": "NetworkPeering" + } + }, + "type": "object" + }, "NodeGroup": { - "description": "A NodeGroup resource. To create a node group, you must first create a node templates. To learn more about node groups and sole-tenant nodes, read the Sole-tenant nodes documentation. (== resource_for beta.nodeGroups ==) (== resource_for v1.nodeGroups ==)", + "description": "Represent a sole-tenant Node Group resource.\n\nA sole-tenant node is a physical server that is dedicated to hosting VM instances only for your specific project. Use sole-tenant nodes to keep your instances physically separated from instances in other projects, or to group your instances together on the same host hardware. For more information, read Sole-tenant nodes. (== resource_for beta.nodeGroups ==) (== resource_for v1.nodeGroups ==) NextID: 16", "id": "NodeGroup", "properties": { "creationTimestamp": { @@ -29898,6 +34512,10 @@ "description": "The type of this node.", "type": "string" }, + "serverBinding": { + "$ref": "ServerBinding", + "description": "Binding properties for the physical server." + }, "status": { "enum": [ "CREATING", @@ -29933,6 +34551,7 @@ "id": "NodeGroupsDeleteNodesRequest", "properties": { "nodes": { + "description": "Names of the nodes to delete.", "items": { "type": "string" }, @@ -30157,7 +34776,7 @@ "type": "object" }, "NodeTemplate": { - "description": "A Node Template resource. To learn more about node templates and sole-tenant nodes, read the Sole-tenant nodes documentation. (== resource_for beta.nodeTemplates ==) (== resource_for v1.nodeTemplates ==)", + "description": "Represent a sole-tenant Node Template resource.\n\nYou can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances. (== resource_for beta.nodeTemplates ==) (== resource_for v1.nodeTemplates ==) (== NextID: 16 ==)", "id": "NodeTemplate", "properties": { "creationTimestamp": { @@ -30205,6 +34824,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "serverBinding": { + "$ref": "ServerBinding", + "description": "Sets the binding properties for the physical server. Valid values include: \n- [Default] RESTART_NODE_ON_ANY_SERVER: Restarts VMs on any available physical server \n- RESTART_NODE_ON_MINIMAL_SERVER: Restarts VMs on the same physical server whenever possible \n\nSee Sole-tenant node options for more information." + }, "status": { "description": "[Output Only] The status of the node template. One of the following values: CREATING, READY, and DELETING.", "enum": [ @@ -30562,7 +35185,7 @@ "type": "object" }, "NodeType": { - "description": "A Node Type resource.", + "description": "Represent a sole-tenant Node Type resource.\n\nEach node within a node group must have a node type. A node type specifies the total amount of cores and memory for that node. Currently, the only available node type is n1-node-96-624 node type that has 96 vCPUs and 624 GB of memory, available in multiple zones. For more information read Node types. (== resource_for beta.nodeTypes ==) (== resource_for v1.nodeTypes ==)", "id": "NodeType", "properties": { "cpuPlatform": { @@ -30941,7 +35564,7 @@ "type": "object" }, "Operation": { - "description": "An Operation resource, used to manage asynchronous API requests. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", + "description": "Represents an Operation resource.\n\nYou can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses.\n\nOperations can be global, regional or zonal. \n- For global operations, use the globalOperations resource. \n- For regional operations, use the regionOperations resource. \n- For zonal operations, use the zonalOperations resource. \n\nFor more information, read Global, Regional, and Zonal Resources. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", "id": "Operation", "properties": { "clientOperationId": { @@ -30997,7 +35620,7 @@ "type": "integer" }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] The unique identifier for the operation. This identifier is defined by the server.", "format": "uint64", "type": "string" }, @@ -31011,7 +35634,7 @@ "type": "string" }, "name": { - "description": "[Output Only] Name of the resource.", + "description": "[Output Only] Name of the operation.", "type": "string" }, "operationType": { @@ -31024,7 +35647,7 @@ "type": "integer" }, "region": { - "description": "[Output Only] The URL of the region where the operation resides. Only available when performing regional operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "description": "[Output Only] The URL of the region where the operation resides. Only applicable when performing regional operations.", "type": "string" }, "selfLink": { @@ -31151,7 +35774,7 @@ "type": "array" }, "zone": { - "description": "[Output Only] The URL of the zone where the operation resides. Only available when performing per-zone operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "description": "[Output Only] The URL of the zone where the operation resides. Only applicable when performing per-zone operations.", "type": "string" } }, @@ -31475,28 +36098,107 @@ }, "type": "object" }, + "OutlierDetection": { + "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service.", + "id": "OutlierDetection", + "properties": { + "baseEjectionTime": { + "$ref": "Duration", + "description": "The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." + }, + "consecutiveErrors": { + "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "consecutiveGatewayFailure": { + "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveErrors": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveGatewayFailure": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "enforcingSuccessRate": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "interval": { + "$ref": "Duration", + "description": "Time interval between ejection sweep analysis. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 seconds." + }, + "maxEjectionPercent": { + "description": "Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%.", + "format": "int32", + "type": "integer" + }, + "successRateMinimumHosts": { + "description": "The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "successRateRequestVolume": { + "description": "The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "successRateStdevFactor": { + "description": "This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "PathMatcher": { "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", "id": "PathMatcher", "properties": { + "defaultRouteAction": { + "$ref": "HttpRouteAction", + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + }, "defaultService": { "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", "type": "string" }, + "defaultUrlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When when none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." + }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap" + }, "name": { "description": "The name to which this PathMatcher is referred by the HostRule.", "type": "string" }, "pathRules": { - "description": "The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis.\nFor example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list.\nOnly one of pathRules or routeRules must be set.", + "description": "The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis.\nFor example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list.\nWithin a given pathMatcher, only one of pathRules or routeRules must be set.", "items": { "$ref": "PathRule" }, "type": "array" + }, + "routeRules": { + "description": "The list of ordered HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. The order of specifying routeRules matters: the first rule that matches will cause its specified routing action to take effect.\nWithin a given pathMatcher, only one of pathRules or routeRules must be set.\nrouteRules are not supported in UrlMaps intended for External Load balancers.", + "items": { + "$ref": "HttpRouteRule" + }, + "type": "array" } }, "type": "object" @@ -31512,15 +36214,23 @@ }, "type": "array" }, + "routeAction": { + "$ref": "HttpRouteAction", + "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", "type": "string" + }, + "urlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." } }, "type": "object" }, "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.\n\n\n\nA `Policy` consists of a list of `bindings`. A `binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM.\n\n**JSON Example**\n\n{ \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] }\n\n**YAML Example**\n\nbindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-other-app@appspot.gserviceaccount.com role: roles/owner - members: - user:sean@example.com role: roles/viewer\n\n\n\nFor a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam/docs).", + "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.\n\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions (defined by IAM or configured by users). A `binding` can optionally specify a `condition`, which is a logic expression that further constrains the role binding based on attributes about the request and/or target resource.\n\n**JSON Example**\n\n{ \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [\"user:eve@example.com\"], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ] }\n\n**YAML Example**\n\nbindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n\nFor a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam/docs).", "id": "Policy", "properties": { "auditConfigs": { @@ -31531,14 +36241,14 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`. `bindings` with no members will result in an error.", + "description": "Associates a list of `members` to a `role`. Optionally may specify a `condition` that determines when binding is in effect. `bindings` with no members will result in an error.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten blindly.", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten. Due to blind-set semantics of an etag-less policy, 'setIamPolicy' will not fail even if either of incoming or stored policy does not meet the version requirements.", "format": "byte", "type": "string" }, @@ -31554,7 +36264,7 @@ "type": "array" }, "version": { - "description": "Deprecated.", + "description": "Specifies the format of the policy.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.\n\nOperations affecting conditional bindings must specify version 3. This can be either setting a conditional policy, modifying a conditional binding, or removing a conditional binding from the stored conditional policy. Operations on non-conditional policies may specify any valid value or leave the field unset.\n\nIf no etag is provided in the call to `setIamPolicy`, any version compliance checks on the incoming and/or stored policy is skipped.", "format": "int32", "type": "integer" } @@ -31562,7 +36272,7 @@ "type": "object" }, "Project": { - "description": "A Project resource. For an overview of projects, see Cloud Platform Resource Hierarchy. (== resource_for v1.projects ==) (== resource_for beta.projects ==)", + "description": "Represents a Project resource.\n\nA project is used to organize resources in a Google Cloud Platform environment. For more information, read about the Resource Hierarchy. (== resource_for v1.projects ==) (== resource_for beta.projects ==)", "id": "Project", "properties": { "commonInstanceMetadata": { @@ -31729,7 +36439,17 @@ "AUTOSCALERS", "BACKEND_BUCKETS", "BACKEND_SERVICES", + "C2_CPUS", "COMMITMENTS", + "COMMITTED_C2_CPUS", + "COMMITTED_CPUS", + "COMMITTED_LOCAL_SSD_TOTAL_GB", + "COMMITTED_N2_CPUS", + "COMMITTED_NVIDIA_K80_GPUS", + "COMMITTED_NVIDIA_P100_GPUS", + "COMMITTED_NVIDIA_P4_GPUS", + "COMMITTED_NVIDIA_T4_GPUS", + "COMMITTED_NVIDIA_V100_GPUS", "CPUS", "CPUS_ALL_REGIONS", "DISKS_TOTAL_GB", @@ -31747,11 +36467,13 @@ "INTERCONNECTS", "INTERCONNECT_ATTACHMENTS_PER_REGION", "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS", + "INTERCONNECT_TOTAL_GBPS", "INTERNAL_ADDRESSES", "IN_USE_ADDRESSES", "IN_USE_BACKUP_SCHEDULES", "IN_USE_SNAPSHOT_SCHEDULES", "LOCAL_SSD_TOTAL_GB", + "N2_CPUS", "NETWORKS", "NETWORK_ENDPOINT_GROUPS", "NVIDIA_K80_GPUS", @@ -31774,6 +36496,7 @@ "PREEMPTIBLE_NVIDIA_V100_GPUS", "REGIONAL_AUTOSCALERS", "REGIONAL_INSTANCE_GROUP_MANAGERS", + "RESERVATIONS", "RESOURCE_POLICIES", "ROUTERS", "ROUTES", @@ -31863,6 +36586,19 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", "" ], "type": "string" @@ -31904,7 +36640,7 @@ "type": "object" }, "Region": { - "description": "Region resource. (== resource_for beta.regions ==) (== resource_for v1.regions ==)", + "description": "Represents a Region resource.\n\nA region is a geographical area where a resource is located. For more information, read Regions and Zones. (== resource_for beta.regions ==) (== resource_for v1.regions ==)", "id": "Region", "properties": { "creationTimestamp": { @@ -32189,6 +36925,32 @@ }, "type": "object" }, + "RegionDisksAddResourcePoliciesRequest": { + "id": "RegionDisksAddResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be added to this disk.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionDisksRemoveResourcePoliciesRequest": { + "id": "RegionDisksRemoveResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be removed from this disk.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "RegionDisksResizeRequest": { "id": "RegionDisksResizeRequest", "properties": { @@ -32808,10 +37570,484 @@ }, "type": "object" }, + "RegionTargetHttpsProxiesSetSslCertificatesRequest": { + "id": "RegionTargetHttpsProxiesSetSslCertificatesRequest", + "properties": { + "sslCertificates": { + "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionUrlMapsValidateRequest": { + "id": "RegionUrlMapsValidateRequest", + "properties": { + "resource": { + "$ref": "UrlMap", + "description": "Content of the UrlMap to be validated." + } + }, + "type": "object" + }, + "RequestMirrorPolicy": { + "description": "A policy that specifies how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow.", + "id": "RequestMirrorPolicy", + "properties": { + "backendService": { + "description": "The full or partial URL to the BackendService resource being mirrored to.", + "type": "string" + } + }, + "type": "object" + }, + "Reservation": { + "description": "Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources. (== resource_for beta.reservations ==) (== resource_for v1.reservations ==)", + "id": "Reservation", + "properties": { + "commitment": { + "description": "[Output Only] Full or partial URL to a parent commitment. This field displays for reservations that are tied to a commitment.", + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#reservation", + "description": "[Output Only] Type of the resource. Always compute#reservations for reservations.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.instances.insert" + ] + }, + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "type": "string" + }, + "specificReservation": { + "$ref": "AllocationSpecificSKUReservation", + "description": "Reservation for instances with specific machine shapes." + }, + "specificReservationRequired": { + "description": "Indicates whether the reservation can be consumed by VMs with affinity for \"any\" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation.", + "type": "boolean" + }, + "status": { + "description": "[Output Only] The status of the reservation.", + "enum": [ + "CREATING", + "DELETING", + "INVALID", + "READY", + "UPDATING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "zone": { + "description": "Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment.", + "type": "string" + } + }, + "type": "object" + }, + "ReservationAffinity": { + "description": "Specifies the reservations that this instance can consume from.", + "id": "ReservationAffinity", + "properties": { + "consumeReservationType": { + "description": "Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples.", + "enum": [ + "ANY_RESERVATION", + "NO_RESERVATION", + "SPECIFIC_RESERVATION", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "key": { + "description": "Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name as the key and specify the name of your reservation as its value.", + "type": "string" + }, + "values": { + "description": "Corresponds to the label values of a reservation resource.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ReservationAggregatedList": { + "description": "Contains a list of reservations.", + "id": "ReservationAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "ReservationsScopedList", + "description": "Name of the scope containing this set of reservations." + }, + "description": "A list of Allocation resources.", + "type": "object" + }, + "kind": { + "default": "compute#reservationAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ReservationList": { + "id": "ReservationList", + "properties": { + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "type": "string" + }, + "items": { + "description": "[Output Only] A list of Allocation resources.", + "items": { + "$ref": "Reservation" + }, + "type": "array" + }, + "kind": { + "default": "compute#reservationList", + "description": "[Output Only] Type of resource.Always compute#reservationsList for listsof reservations", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ReservationsResizeRequest": { + "id": "ReservationsResizeRequest", + "properties": { + "specificSkuCount": { + "description": "Number of allocated resources can be resized with minimum = 1 and maximum = 1000.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ReservationsScopedList": { + "id": "ReservationsScopedList", + "properties": { + "reservations": { + "description": "A list of reservations contained in this scope.", + "items": { + "$ref": "Reservation" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of reservations when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "ResourceCommitment": { "description": "Commitment for a particular resource (a Commitment is composed of one or more of these).", "id": "ResourceCommitment", "properties": { + "acceleratorType": { + "description": "Name of the accelerator type resource. Applicable only when the type is ACCELERATOR.", + "type": "string" + }, "amount": { "description": "The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU.", "format": "int64", @@ -32820,11 +38056,15 @@ "type": { "description": "Type of resource for which this commitment applies. Possible values are VCPU and MEMORY", "enum": [ + "ACCELERATOR", + "LOCAL_SSD", "MEMORY", "UNSPECIFIED", "VCPU" ], "enumDescriptions": [ + "", + "", "", "", "" @@ -32844,8 +38084,570 @@ }, "type": "object" }, + "ResourcePoliciesScopedList": { + "id": "ResourcePoliciesScopedList", + "properties": { + "resourcePolicies": { + "description": "A list of resourcePolicies contained in this scope.", + "items": { + "$ref": "ResourcePolicy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of resourcePolicies when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ResourcePolicy": { + "id": "ResourcePolicy", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#resourcePolicy", + "description": "[Output Only] Type of the resource. Always compute#resource_policies for resource policies.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.instances.insert" + ] + }, + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "region": { + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "type": "string" + }, + "snapshotSchedulePolicy": { + "$ref": "ResourcePolicySnapshotSchedulePolicy", + "description": "Resource policy for persistent disks for creating snapshots." + }, + "status": { + "description": "[Output Only] The status of resource policy creation.", + "enum": [ + "CREATING", + "DELETING", + "INVALID", + "READY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "ResourcePolicyAggregatedList": { + "description": "Contains a list of resourcePolicies.", + "id": "ResourcePolicyAggregatedList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "ResourcePoliciesScopedList", + "description": "Name of the scope containing this set of resourcePolicies." + }, + "description": "A list of ResourcePolicy resources.", + "type": "object" + }, + "kind": { + "default": "compute#resourcePolicyAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ResourcePolicyDailyCycle": { + "description": "Time window specified for daily operations.", + "id": "ResourcePolicyDailyCycle", + "properties": { + "daysInCycle": { + "description": "Defines a schedule that runs every nth day of the month.", + "format": "int32", + "type": "integer" + }, + "duration": { + "description": "[Output only] A predetermined duration for the window, automatically chosen to be the smallest possible in the given scenario.", + "type": "string" + }, + "startTime": { + "description": "Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.", + "type": "string" + } + }, + "type": "object" + }, + "ResourcePolicyHourlyCycle": { + "description": "Time window specified for hourly operations.", + "id": "ResourcePolicyHourlyCycle", + "properties": { + "duration": { + "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario.", + "type": "string" + }, + "hoursInCycle": { + "description": "Allows to define schedule that runs every nth hour.", + "format": "int32", + "type": "integer" + }, + "startTime": { + "description": "Time within the window to start the operations. It must be in format \"HH:MM\", where HH : [00-23] and MM : [00-00] GMT.", + "type": "string" + } + }, + "type": "object" + }, + "ResourcePolicyList": { + "id": "ResourcePolicyList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "type": "string" + }, + "items": { + "description": "[Output Only] A list of ResourcePolicy resources.", + "items": { + "$ref": "ResourcePolicy" + }, + "type": "array" + }, + "kind": { + "default": "compute#resourcePolicyList", + "description": "[Output Only] Type of resource.Always compute#resourcePoliciesList for listsof resourcePolicies", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ResourcePolicySnapshotSchedulePolicy": { + "description": "A snapshot schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained.", + "id": "ResourcePolicySnapshotSchedulePolicy", + "properties": { + "retentionPolicy": { + "$ref": "ResourcePolicySnapshotSchedulePolicyRetentionPolicy", + "description": "Retention policy applied to snapshots created by this resource policy." + }, + "schedule": { + "$ref": "ResourcePolicySnapshotSchedulePolicySchedule", + "description": "A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy." + }, + "snapshotProperties": { + "$ref": "ResourcePolicySnapshotSchedulePolicySnapshotProperties", + "description": "Properties with which snapshots are created such as labels, encryption keys." + } + }, + "type": "object" + }, + "ResourcePolicySnapshotSchedulePolicyRetentionPolicy": { + "description": "Policy for retention of scheduled snapshots.", + "id": "ResourcePolicySnapshotSchedulePolicyRetentionPolicy", + "properties": { + "maxRetentionDays": { + "description": "Maximum age of the snapshot that is allowed to be kept.", + "format": "int32", + "type": "integer" + }, + "onSourceDiskDelete": { + "description": "Specifies the behavior to apply to scheduled snapshots when the source disk is deleted.", + "enum": [ + "APPLY_RETENTION_POLICY", + "KEEP_AUTO_SNAPSHOTS", + "UNSPECIFIED_ON_SOURCE_DISK_DELETE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "ResourcePolicySnapshotSchedulePolicySchedule": { + "description": "A schedule for disks where the schedueled operations are performed.", + "id": "ResourcePolicySnapshotSchedulePolicySchedule", + "properties": { + "dailySchedule": { + "$ref": "ResourcePolicyDailyCycle" + }, + "hourlySchedule": { + "$ref": "ResourcePolicyHourlyCycle" + }, + "weeklySchedule": { + "$ref": "ResourcePolicyWeeklyCycle" + } + }, + "type": "object" + }, + "ResourcePolicySnapshotSchedulePolicySnapshotProperties": { + "description": "Specified snapshot properties for scheduled snapshots created by this policy.", + "id": "ResourcePolicySnapshotSchedulePolicySnapshotProperties", + "properties": { + "guestFlush": { + "description": "Indication to perform a ?guest aware? snapshot.", + "type": "boolean" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty.", + "type": "object" + }, + "storageLocations": { + "description": "Cloud Storage bucket storage location of the auto snapshot (regional or multi-regional).", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ResourcePolicyWeeklyCycle": { + "description": "Time window specified for weekly operations.", + "id": "ResourcePolicyWeeklyCycle", + "properties": { + "dayOfWeeks": { + "description": "Up to 7 intervals/windows, one for each day of the week.", + "items": { + "$ref": "ResourcePolicyWeeklyCycleDayOfWeek" + }, + "type": "array" + } + }, + "type": "object" + }, + "ResourcePolicyWeeklyCycleDayOfWeek": { + "id": "ResourcePolicyWeeklyCycleDayOfWeek", + "properties": { + "day": { + "description": "Allows to define schedule that runs specified day of the week.", + "enum": [ + "FRIDAY", + "INVALID", + "MONDAY", + "SATURDAY", + "SUNDAY", + "THURSDAY", + "TUESDAY", + "WEDNESDAY" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "duration": { + "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario.", + "type": "string" + }, + "startTime": { + "description": "Time within the window to start the operations. It must be in format \"HH:MM\", where HH : [00-23] and MM : [00-00] GMT.", + "type": "string" + } + }, + "type": "object" + }, "Route": { - "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped. (== resource_for beta.routes ==) (== resource_for v1.routes ==)", + "description": "Represents a Route resource.\n\nA route defines a path from VM instances in the VPC network to a specific destination. This destination can be inside or outside the VPC network. For more information, read the Routes overview. (== resource_for beta.routes ==) (== resource_for v1.routes ==)", "id": "Route", "properties": { "creationTimestamp": { @@ -32853,7 +38655,7 @@ "type": "string" }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "An optional description of this resource. Provide this field when you create the resource.", "type": "string" }, "destRange": { @@ -32881,7 +38683,7 @@ "compute.routes.insert" ] }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, @@ -32895,7 +38697,11 @@ "type": "string" }, "nextHopGateway": { - "description": "The URL to a gateway that should handle matching packets. You can only specify the internet gateway using a full or partial valid URL: projects/\u003cproject-id\u003e/global/gateways/default-internet-gateway", + "description": "The URL to a gateway that should handle matching packets. You can only specify the internet gateway using a full or partial valid URL: projects/project/global/gateways/default-internet-gateway", + "type": "string" + }, + "nextHopIlb": { + "description": "The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should handle matching packets. You can only specify the forwarding rule as a partial or full URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule \n- regions/region/forwardingRules/forwardingRule", "type": "string" }, "nextHopInstance": { @@ -32924,7 +38730,7 @@ "compute.routes.insert" ] }, - "description": "The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In the case of two routes with equal prefix length, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535.", + "description": "The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In cases where multiple routes have equal prefix length, the one with the lowest-numbered priority value wins. The default value is `1000`. The priority value must be from `0` to `65535`, inclusive.", "format": "uint32", "type": "integer" }, @@ -33144,7 +38950,7 @@ "type": "object" }, "Router": { - "description": "Router resource.", + "description": "Represents a Cloud Router resource.\n\nFor more information about Cloud Router, read the the Cloud Router overview.", "id": "Router", "properties": { "bgp": { @@ -33152,7 +38958,7 @@ "description": "BGP information specific to this router." }, "bgpPeers": { - "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Please refer to RFC4273.", + "description": "BGP information that must be configured into the routing stack to establish BGP peering. This information must specify the peer ASN and either the interface name, IP address, or peer IP address. Please refer to RFC4273.", "items": { "$ref": "RouterBgpPeer" }, @@ -33172,7 +38978,7 @@ "type": "string" }, "interfaces": { - "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel), or IP address and IP address range (e.g. ipRange), or both.", + "description": "Router interfaces. Each interface requires either one linked resource, (for example, linkedVpnTunnel), or IP address and IP address range (for example, ipRange), or both.", "items": { "$ref": "RouterInterface" }, @@ -33194,7 +39000,7 @@ "type": "string" }, "nats": { - "description": "A list of Nat services created in this router.", + "description": "A list of NAT services created in this router.", "items": { "$ref": "RouterNat" }, @@ -33352,7 +39158,7 @@ "id": "RouterBgp", "properties": { "advertiseMode": { - "description": "User-specified flag to indicate which mode to use for advertisement.", + "description": "User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM.", "enum": [ "CUSTOM", "DEFAULT" @@ -33407,7 +39213,7 @@ "type": "string" }, "advertisedGroups": { - "description": "User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "description": "User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: \n- ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. \n- ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. \n- ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC network. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", "items": { "enum": [ "ALL_SUBNETS" @@ -33420,14 +39226,14 @@ "type": "array" }, "advertisedIpRanges": { - "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", "items": { "$ref": "RouterAdvertisedIpRange" }, "type": "array" }, "advertisedRoutePriority": { - "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", + "description": "The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win.", "format": "uint32", "type": "integer" }, @@ -33440,7 +39246,7 @@ "type": "string" }, "managementType": { - "description": "[Output Only] The resource that configures and manages this BGP peer. MANAGED_BY_USER is the default value and can be managed by you or other users; MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google will automatically create, update, and delete this type of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "description": "[Output Only] The resource that configures and manages this BGP peer. \n- MANAGED_BY_USER is the default value and can be managed by you or other users \n- MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.", "enum": [ "MANAGED_BY_ATTACHMENT", "MANAGED_BY_USER" @@ -33452,17 +39258,27 @@ "type": "string" }, "name": { - "description": "Name of this BGP peer. The name must be 1-63 characters long and comply with RFC1035.", + "annotations": { + "required": [ + "compute.routers.insert" + ] + }, + "description": "Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "peerAsn": { - "description": "Peer BGP Autonomous System Number (ASN). For VPN use case, this value can be different for every tunnel.", + "annotations": { + "required": [ + "compute.routers.insert" + ] + }, + "description": "Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value.", "format": "uint32", "type": "integer" }, "peerIpAddress": { - "description": "IP address of the BGP interface outside Google cloud. Only IPv4 is supported.", + "description": "IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported.", "type": "string" } }, @@ -33472,19 +39288,19 @@ "id": "RouterInterface", "properties": { "ipRange": { - "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface.", + "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface.", "type": "string" }, "linkedInterconnectAttachment": { - "description": "URI of the linked interconnect attachment. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment.", + "description": "URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be either be a VPN tunnel or an Interconnect attachment.", "type": "string" }, "linkedVpnTunnel": { - "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment.", + "description": "URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be either a VPN tunnel or an Interconnect attachment.", "type": "string" }, "managementType": { - "description": "[Output Only] The resource that configures and manages this interface. MANAGED_BY_USER is the default value and can be managed by you or other users; MANAGED_BY_ATTACHMENT is an interface that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google will automatically create, update, and delete this type of interface when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "description": "[Output Only] The resource that configures and manages this interface. \n- MANAGED_BY_USER is the default value and can be managed directly by users. \n- MANAGED_BY_ATTACHMENT is an interface that is configured and managed by Cloud Interconnect, specifically, by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type of interface when the PARTNER InterconnectAttachment is created, updated, or deleted.", "enum": [ "MANAGED_BY_ATTACHMENT", "MANAGED_BY_USER" @@ -33496,7 +39312,12 @@ "type": "string" }, "name": { - "description": "Name of this interface entry. The name must be 1-63 characters long and comply with RFC1035.", + "annotations": { + "required": [ + "compute.routers.insert" + ] + }, + "description": "Name of this interface entry. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" } @@ -33619,13 +39440,24 @@ "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", "id": "RouterNat", "properties": { + "drainNatIps": { + "description": "A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only.", + "items": { + "type": "string" + }, + "type": "array" + }, "icmpIdleTimeoutSec": { "description": "Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.", "format": "int32", "type": "integer" }, + "logConfig": { + "$ref": "RouterNatLogConfig", + "description": "Configure logging on this NAT." + }, "minPortsPerVm": { - "description": "Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This gets rounded up to the nearest power of 2. Eg. if the value of this field is 50, at least 64 ports will be allocated to a VM.", + "description": "Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM.", "format": "int32", "type": "integer" }, @@ -33635,7 +39467,7 @@ "type": "string" }, "natIpAllocateOption": { - "description": "Specify the NatIpAllocateOption. If it is AUTO_ONLY, then nat_ip should be empty.", + "description": "Specify the NatIpAllocateOption, which can take one of the following values: \n- MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. \n- AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty.", "enum": [ "AUTO_ONLY", "MANUAL_ONLY" @@ -33647,14 +39479,14 @@ "type": "string" }, "natIps": { - "description": "A list of URLs of the IP resources used for this Nat service. These IPs must be valid static external IP addresses assigned to the project. max_length is subject to change post alpha.", + "description": "A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project.", "items": { "type": "string" }, "type": "array" }, "sourceSubnetworkIpRangesToNat": { - "description": "Specify the Nat option. If this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", + "description": "Specify the Nat option, which can take one of the following values: \n- ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. \n- ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. \n- LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", "enum": [ "ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", @@ -33692,12 +39524,37 @@ }, "type": "object" }, + "RouterNatLogConfig": { + "description": "Configuration of logging on a NAT.", + "id": "RouterNatLogConfig", + "properties": { + "enable": { + "description": "Indicates whether or not to export logs. This is false by default.", + "type": "boolean" + }, + "filter": { + "description": "Specify the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT. This option can take one of the following values: \n- ERRORS_ONLY: Export logs only for connection failures. \n- TRANSLATIONS_ONLY: Export logs only for successful connections. \n- ALL: Export logs for all connections, successful and unsuccessful.", + "enum": [ + "ALL", + "ERRORS_ONLY", + "TRANSLATIONS_ONLY" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "RouterNatSubnetworkToNat": { "description": "Defines the IP ranges that want to use NAT for a subnetwork.", "id": "RouterNatSubnetworkToNat", "properties": { "name": { - "description": "URL for the subnetwork resource to use NAT.", + "description": "URL for the subnetwork resource that will use NAT.", "type": "string" }, "secondaryIpRangeNames": { @@ -33708,7 +39565,7 @@ "type": "array" }, "sourceIpRangesToNat": { - "description": "Specify the options for NAT ranges in the Subnetwork. All usages of single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: [\"PRIMARY_IP_RANGE\", \"LIST_OF_SECONDARY_IP_RANGES\"] Default: [ALL_IP_RANGES]", + "description": "Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: [\"PRIMARY_IP_RANGE\", \"LIST_OF_SECONDARY_IP_RANGES\"] Default: [ALL_IP_RANGES]", "items": { "enum": [ "ALL_IP_RANGES", @@ -33834,6 +39691,20 @@ }, "type": "array" }, + "drainAutoAllocatedNatIps": { + "description": "A list of IPs auto-allocated for NAT that are in drain mode. Example: [\"1.1.1.1\", \"179.12.26.133\"].", + "items": { + "type": "string" + }, + "type": "array" + }, + "drainUserAllocatedNatIps": { + "description": "A list of IPs user-allocated for NAT that are in drain mode. Example: [\"1.1.1.1\", \"179.12.26.133\"].", + "items": { + "type": "string" + }, + "type": "array" + }, "minExtraNatIpsNeeded": { "description": "The number of extra IPs to allocate. This will be greater than 0 only if user-specified IPs are NOT enough to allow all configured VMs to use NAT. This value is meaningful only when auto-allocation of NAT IPs is *not* used.", "format": "int32", @@ -34099,7 +39970,7 @@ "type": "object" }, "Scheduling": { - "description": "Sets the scheduling options for an Instance.", + "description": "Sets the scheduling options for an Instance. NextID: 9", "id": "Scheduling", "properties": { "automaticRestart": { @@ -34107,7 +39978,7 @@ "type": "boolean" }, "nodeAffinities": { - "description": "A set of node affinity and anti-affinity.", + "description": "A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information.", "items": { "$ref": "SchedulingNodeAffinity" }, @@ -34141,7 +40012,7 @@ "type": "string" }, "operator": { - "description": "Defines the operation of node selection.", + "description": "Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity.", "enum": [ "IN", "NOT_IN", @@ -34165,7 +40036,7 @@ "type": "object" }, "SecurityPolicy": { - "description": "A security policy is comprised of one or more rules. It can also be associated with one or more 'targets'. (== resource_for v1.securityPolicies ==) (== resource_for beta.securityPolicies ==)", + "description": "Represents a Cloud Armor Security Policy resource.\n\nOnly external backend services that use load balancers can reference a Security Policy. For more information, read Cloud Armor Security Policy Concepts. (== resource_for v1.securityPolicies ==) (== resource_for beta.securityPolicies ==)", "id": "SecurityPolicy", "properties": { "creationTimestamp": { @@ -34423,6 +40294,25 @@ }, "type": "object" }, + "ServerBinding": { + "id": "ServerBinding", + "properties": { + "type": { + "enum": [ + "RESTART_NODE_ON_ANY_SERVER", + "RESTART_NODE_ON_MINIMAL_SERVERS", + "SERVER_BINDING_TYPE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "ServiceAccount": { "description": "A service account.", "id": "ServiceAccount", @@ -34466,7 +40356,7 @@ "properties": { "encryptionKey": { "$ref": "ShieldedInstanceIdentityEntry", - "description": "An Endorsement Key (EK) issued to the Shielded Instance's vTPM." + "description": "An Endorsement Key (EK) made by the RSA 2048 algorithm issued to the Shielded Instance's vTPM." }, "kind": { "default": "compute#shieldedInstanceIdentity", @@ -34475,7 +40365,7 @@ }, "signingKey": { "$ref": "ShieldedInstanceIdentityEntry", - "description": "An Attestation Key (AK) issued to the Shielded Instance's vTPM." + "description": "An Attestation Key (AK) made by the RSA 2048 algorithm issued to the Shielded Instance's vTPM." } }, "type": "object" @@ -34523,9 +40413,13 @@ "type": "object" }, "Snapshot": { - "description": "A persistent disk snapshot resource. (== resource_for beta.snapshots ==) (== resource_for v1.snapshots ==)", + "description": "Represents a Persistent Disk Snapshot resource.\n\nYou can use snapshots to back up data on a regular interval. For more information, read Creating persistent disk snapshots. (== resource_for beta.snapshots ==) (== resource_for v1.snapshots ==)", "id": "Snapshot", "properties": { + "autoCreated": { + "description": "[Output Only] Set to true if snapshots are automatically by applying resource policy on the target disk.", + "type": "boolean" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -34535,7 +40429,7 @@ "type": "string" }, "diskSizeGb": { - "description": "[Output Only] Size of the snapshot, specified in GB.", + "description": "[Output Only] Size of the source disk, specified in GB.", "format": "int64", "type": "string" }, @@ -34587,7 +40481,7 @@ }, "snapshotEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the image later For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." + "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the snapshot later. For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the snapshot.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." }, "sourceDisk": { "description": "[Output Only] The source disk used to create this snapshot.", @@ -34637,7 +40531,7 @@ "type": "string" }, "storageLocations": { - "description": "GCS bucket storage location of the snapshot (regional or multi-regional).", + "description": "Cloud Storage bucket storage location of the snapshot (regional or multi-regional).", "items": { "type": "string" }, @@ -34773,7 +40667,7 @@ "type": "object" }, "SslCertificate": { - "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user. (== resource_for beta.sslCertificates ==) (== resource_for v1.sslCertificates ==)", + "description": "Represents an SSL Certificate resource.\n\nThis SSL certificate resource also contains a private key. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and Using SSL Certificates. (== resource_for beta.sslCertificates ==) (== resource_for v1.sslCertificates ==)", "id": "SslCertificate", "properties": { "certificate": { @@ -34807,6 +40701,10 @@ "description": "A write-only private key in PEM format. Only insert requests will include this field.", "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the regional SSL Certificate resides. This field is not applicable to global SSL Certificate.", + "type": "string" + }, "selfLink": { "description": "[Output only] Server-defined URL for the resource.", "type": "string" @@ -34814,6 +40712,118 @@ }, "type": "object" }, + "SslCertificateAggregatedList": { + "id": "SslCertificateAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "SslCertificatesScopedList", + "description": "Name of the scope containing this set of SslCertificates." + }, + "description": "A list of SslCertificatesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#sslCertificateAggregatedList", + "description": "[Output Only] Type of resource. Always compute#sslCertificateAggregatedList for lists of SSL Certificates.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "SslCertificateList": { "description": "Contains a list of SslCertificate resources.", "id": "SslCertificateList", @@ -34926,6 +40936,100 @@ }, "type": "object" }, + "SslCertificatesScopedList": { + "id": "SslCertificatesScopedList", + "properties": { + "sslCertificates": { + "description": "List of SslCertificates contained in this scope.", + "items": { + "$ref": "SslCertificate" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "SslPoliciesList": { "id": "SslPoliciesList", "properties": { @@ -35050,7 +41154,7 @@ "type": "object" }, "SslPolicy": { - "description": "A SSL policy specifies the server-side support for SSL features. This can be attached to a TargetHttpsProxy or a TargetSslProxy. This affects connections between clients and the HTTPS or SSL proxy load balancer. They do not affect the connection between the load balancers and the backends.", + "description": "Represents a Cloud Armor Security Policy resource.\n\nOnly external backend services used by HTTP or HTTPS load balancers can reference a Security Policy. For more information, read read Cloud Armor Security Policy Concepts. (== resource_for beta.sslPolicies ==) (== resource_for v1.sslPolicies ==)", "id": "SslPolicy", "properties": { "creationTimestamp": { @@ -35227,7 +41331,7 @@ "type": "object" }, "Subnetwork": { - "description": "A Subnetwork resource. (== resource_for beta.subnetworks ==) (== resource_for v1.subnetworks ==)", + "description": "Represents a Subnetwork resource.\n\nA subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network. (== resource_for beta.subnetworks ==) (== resource_for v1.subnetworks ==)", "id": "Subnetwork", "properties": { "creationTimestamp": { @@ -35265,6 +41369,10 @@ "description": "[Output Only] Type of the resource. Always compute#subnetwork for Subnetwork resources.", "type": "string" }, + "logConfig": { + "$ref": "SubnetworkLogConfig", + "description": "This field denotes the VPC flow logging options for this subnetwork. If logging is enabled, logs are exported to Stackdriver." + }, "name": { "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -35278,10 +41386,36 @@ "description": "Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess.", "type": "boolean" }, + "purpose": { + "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", + "PRIVATE_RFC_1918" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "region": { "description": "URL of the region where the Subnetwork resides. This field can be set only at resource creation time.", "type": "string" }, + "role": { + "description": "The role of subnetwork. Currenly, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "secondaryIpRanges": { "description": "An array of configurations for secondary IP ranges for VM instances contained in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange of the subnetwork. The alias IPs may belong to either primary or secondary ranges. This field can be updated with a patch request.", "items": { @@ -35292,6 +41426,18 @@ "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + }, + "state": { + "description": "[Output Only] The state of the subnetwork, which can be one of READY or DRAINING. A subnetwork that is READY is ready to be used. The state of DRAINING is only applicable to subnetworks that have the purpose set to INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to the load balancer are being drained. A subnetwork that is draining cannot be used or modified until it reaches a status of READY.", + "enum": [ + "DRAINING", + "READY" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" @@ -35520,6 +41666,54 @@ }, "type": "object" }, + "SubnetworkLogConfig": { + "description": "The available logging options for this subnetwork.", + "id": "SubnetworkLogConfig", + "properties": { + "aggregationInterval": { + "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection.", + "enum": [ + "INTERVAL_10_MIN", + "INTERVAL_15_MIN", + "INTERVAL_1_MIN", + "INTERVAL_30_SEC", + "INTERVAL_5_MIN", + "INTERVAL_5_SEC" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "enable": { + "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is to disable flow logging.", + "type": "boolean" + }, + "flowSampling": { + "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5, which means half of all collected logs are reported.", + "format": "float", + "type": "number" + }, + "metadata": { + "description": "Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is INCLUDE_ALL_METADATA.", + "enum": [ + "EXCLUDE_ALL_METADATA", + "INCLUDE_ALL_METADATA" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "SubnetworkSecondaryRange": { "description": "Represents a secondary IP range of a subnetwork.", "id": "SubnetworkSecondaryRange", @@ -35716,8 +41910,102 @@ }, "type": "object" }, + "TargetHttpProxiesScopedList": { + "id": "TargetHttpProxiesScopedList", + "properties": { + "targetHttpProxies": { + "description": "A list of TargetHttpProxies contained in this scope.", + "items": { + "$ref": "TargetHttpProxy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "TargetHttpProxy": { - "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== resource_for v1.targetHttpProxies ==)", + "description": "Represents a Target HTTP Proxy resource.\n\nA target HTTP proxy is a component of certain types of load balancers. Global forwarding rules reference a target HTTP proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies. (== resource_for beta.targetHttpProxies ==) (== resource_for v1.targetHttpProxies ==)", "id": "TargetHttpProxy", "properties": { "creationTimestamp": { @@ -35743,6 +42031,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the regional Target HTTP Proxy resides. This field is not applicable to global Target HTTP Proxies.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -35754,6 +42046,37 @@ }, "type": "object" }, + "TargetHttpProxyAggregatedList": { + "id": "TargetHttpProxyAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "TargetHttpProxiesScopedList", + "description": "Name of the scope containing this set of TargetHttpProxies." + }, + "description": "A list of TargetHttpProxiesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#targetHttpProxyAggregatedList", + "description": "[Output Only] Type of resource. Always compute#targetHttpProxyAggregatedList for lists of Target HTTP Proxies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + } + }, + "type": "object" + }, "TargetHttpProxyList": { "description": "A list of TargetHttpProxy resources.", "id": "TargetHttpProxyList", @@ -35866,6 +42189,100 @@ }, "type": "object" }, + "TargetHttpsProxiesScopedList": { + "id": "TargetHttpsProxiesScopedList", + "properties": { + "targetHttpsProxies": { + "description": "A list of TargetHttpsProxies contained in this scope.", + "items": { + "$ref": "TargetHttpsProxy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "TargetHttpsProxiesSetQuicOverrideRequest": { "id": "TargetHttpsProxiesSetQuicOverrideRequest", "properties": { @@ -35900,7 +42317,7 @@ "type": "object" }, "TargetHttpsProxy": { - "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== resource_for v1.targetHttpsProxies ==)", + "description": "Represents a Target HTTPS Proxy resource.\n\nA target HTTPS proxy is a component of certain types of load balancers. Global forwarding rules reference a target HTTPS proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies. (== resource_for beta.targetHttpsProxies ==) (== resource_for v1.targetHttpsProxies ==)", "id": "TargetHttpsProxy", "properties": { "creationTimestamp": { @@ -35927,7 +42344,7 @@ "type": "string" }, "quicOverride": { - "description": "Specifies the QUIC override policy for this TargetHttpsProxy resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, Enables QUIC when set to ENABLE, and disables QUIC when set to DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Not specifying this field is equivalent to specifying NONE.", + "description": "Specifies the QUIC override policy for this TargetHttpsProxy resource. This setting determines whether the load balancer attempts to negotiate QUIC with clients. You can specify NONE, ENABLE, or DISABLE. \n- When quic-override is set to NONE, Google manages whether QUIC is used. \n- When quic-override is set to ENABLE, the load balancer uses QUIC when possible. \n- When quic-override is set to DISABLE, the load balancer doesn't use QUIC. \n- If the quic-override flag is not specified, NONE is implied.\n-", "enum": [ "DISABLE", "ENABLE", @@ -35940,6 +42357,10 @@ ], "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the regional TargetHttpsProxy resides. This field is not applicable to global TargetHttpsProxies.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -35952,7 +42373,7 @@ "type": "array" }, "sslPolicy": { - "description": "URL of SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured.", + "description": "URL of SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource has no SSL policy configured.", "type": "string" }, "urlMap": { @@ -35962,6 +42383,118 @@ }, "type": "object" }, + "TargetHttpsProxyAggregatedList": { + "id": "TargetHttpsProxyAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "TargetHttpsProxiesScopedList", + "description": "Name of the scope containing this set of TargetHttpsProxies." + }, + "description": "A list of TargetHttpsProxiesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#targetHttpsProxyAggregatedList", + "description": "[Output Only] Type of resource. Always compute#targetHttpsProxyAggregatedList for lists of Target HTTP Proxies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "TargetHttpsProxyList": { "description": "Contains a list of TargetHttpsProxy resources.", "id": "TargetHttpsProxyList", @@ -36075,7 +42608,7 @@ "type": "object" }, "TargetInstance": { - "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols. (== resource_for beta.targetInstances ==) (== resource_for v1.targetInstances ==)", + "description": "Represents a Target Instance resource.\n\nYou can use a target instance to handle traffic for one or more forwarding rules, which is ideal for forwarding protocol traffic that is managed by a single source. For example, ESP, AH, TCP, or UDP. For more information, read Target instances. (== resource_for beta.targetInstances ==) (== resource_for v1.targetInstances ==)", "id": "TargetInstance", "properties": { "creationTimestamp": { @@ -36445,7 +42978,7 @@ "type": "object" }, "TargetPool": { - "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool. (== resource_for beta.targetPools ==) (== resource_for v1.targetPools ==)", + "description": "Represents a Target Pool resource.\n\nTarget pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools. (== resource_for beta.targetPools ==) (== resource_for v1.targetPools ==)", "id": "TargetPool", "properties": { "backupPool": { @@ -36509,6 +43042,8 @@ "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", + "HEADER_FIELD", + "HTTP_COOKIE", "NONE" ], "enumDescriptions": [ @@ -36516,6 +43051,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -36961,7 +43498,7 @@ "type": "object" }, "TargetSslProxy": { - "description": "A TargetSslProxy resource. This resource defines an SSL proxy. (== resource_for beta.targetSslProxies ==) (== resource_for v1.targetSslProxies ==)", + "description": "Represents a Target SSL Proxy resource.\n\nA target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies. (== resource_for beta.targetSslProxies ==) (== resource_for v1.targetSslProxies ==)", "id": "TargetSslProxy", "properties": { "creationTimestamp": { @@ -37162,7 +43699,7 @@ "type": "object" }, "TargetTcpProxy": { - "description": "A TargetTcpProxy resource. This resource defines a TCP proxy. (== resource_for beta.targetTcpProxies ==) (== resource_for v1.targetTcpProxies ==)", + "description": "Represents a Target TCP Proxy resource.\n\nA target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing Concepts. (== resource_for beta.targetTcpProxies ==) (== resource_for v1.targetTcpProxies ==)", "id": "TargetTcpProxy", "properties": { "creationTimestamp": { @@ -37324,7 +43861,7 @@ "type": "object" }, "TargetVpnGateway": { - "description": "Represents a Target VPN gateway resource. (== resource_for beta.targetVpnGateways ==) (== resource_for v1.targetVpnGateways ==)", + "description": "Represents a Target VPN Gateway resource.\n\nThe target VPN gateway resource represents a Classic Cloud VPN gateway. For more information, read the the Cloud VPN Overview. (== resource_for beta.targetVpnGateways ==) (== resource_for v1.targetVpnGateways ==)", "id": "TargetVpnGateway", "properties": { "creationTimestamp": { @@ -37768,17 +44305,25 @@ "type": "object" }, "UrlMap": { - "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", + "description": "Represents a URL Map resource.\n\nA URL map resource is a component of certain types of load balancers. This resource defines mappings from host names and URL paths to either a backend service or a backend bucket.\n\nTo use this resource, the backend service must have a loadBalancingScheme of either EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED For more information, read URL Map Concepts.", "id": "UrlMap", "properties": { "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, + "defaultRouteAction": { + "$ref": "HttpRouteAction", + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + }, "defaultService": { "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.", "type": "string" }, + "defaultUrlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When none of the specified hostRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." + }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" @@ -37788,6 +44333,10 @@ "format": "byte", "type": "string" }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here take effect after headerAction specified under pathMatcher." + }, "hostRules": { "description": "The list of HostRules to use against the URL.", "items": { @@ -37817,6 +44366,10 @@ }, "type": "array" }, + "region": { + "description": "[Output Only] URL of the region where the regional URL map resides. This field is not applicable to global URL maps. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -38002,6 +44555,212 @@ }, "type": "object" }, + "UrlMapsAggregatedList": { + "id": "UrlMapsAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "UrlMapsScopedList", + "description": "Name of the scope containing this set of UrlMaps." + }, + "description": "A list of UrlMapsScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#urlMapsAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "UrlMapsScopedList": { + "id": "UrlMapsScopedList", + "properties": { + "urlMaps": { + "description": "A list of UrlMaps contained in this scope.", + "items": { + "$ref": "UrlMap" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "UrlMapsValidateRequest": { "id": "UrlMapsValidateRequest", "properties": { @@ -38021,6 +44780,21 @@ }, "type": "object" }, + "UrlRewrite": { + "description": "The spec for modifying the path before sending the request to the matched backend service.", + "id": "UrlRewrite", + "properties": { + "hostRewrite": { + "description": "Prior to forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite.\nThe value must be between 1 and 255 characters.", + "type": "string" + }, + "pathPrefixRewrite": { + "description": "Prior to forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite.\nThe value must be between 1 and 1024 characters.", + "type": "string" + } + }, + "type": "object" + }, "UsableSubnetwork": { "description": "Subnetwork which the current user has compute.subnetworks.use permission on.", "id": "UsableSubnetwork", @@ -38209,6 +44983,13 @@ "description": "Contain information of Nat mapping for an interface of this endpoint.", "id": "VmEndpointNatMappingsInterfaceNatMappings", "properties": { + "drainNatIpPortRanges": { + "description": "List of all drain IP:port-range mappings assigned to this interface. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", + "items": { + "type": "string" + }, + "type": "array" + }, "natIpPortRanges": { "description": "A list of all IP:port-range mappings assigned to this interface. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", "items": { @@ -38216,6 +44997,11 @@ }, "type": "array" }, + "numTotalDrainNatPorts": { + "description": "Total number of drain ports across all NAT IPs allocated to this interface. It equals to the aggregated port number in the field drain_nat_ip_port_ranges.", + "format": "int32", + "type": "integer" + }, "numTotalNatPorts": { "description": "Total number of ports across all NAT IPs allocated to this interface. It equals to the aggregated port number in the field nat_ip_port_ranges.", "format": "int32", @@ -38344,8 +45130,511 @@ }, "type": "object" }, + "VpnGateway": { + "description": "Represents a VPN gateway resource.", + "id": "VpnGateway", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#vpnGateway", + "description": "[Output Only] Type of resource. Always compute#vpnGateway for VPN gateways.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this VpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an VpnGateway.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this VpnGateway resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "type": "object" + }, + "name": { + "annotations": { + "required": [ + "compute.vpnGateways.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "annotations": { + "required": [ + "compute.vpnGateways.insert" + ] + }, + "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the VPN gateway resides.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "vpnInterfaces": { + "description": "[Output Only] A list of interfaces on this VPN gateway.", + "items": { + "$ref": "VpnGatewayVpnGatewayInterface" + }, + "type": "array" + } + }, + "type": "object" + }, + "VpnGatewayAggregatedList": { + "id": "VpnGatewayAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "VpnGatewaysScopedList", + "description": "[Output Only] Name of the scope containing this set of VPN gateways." + }, + "description": "A list of VpnGateway resources.", + "type": "object" + }, + "kind": { + "default": "compute#vpnGatewayAggregatedList", + "description": "[Output Only] Type of resource. Always compute#vpnGateway for VPN gateways.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "VpnGatewayList": { + "description": "Contains a list of VpnGateway resources.", + "id": "VpnGatewayList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of VpnGateway resources.", + "items": { + "$ref": "VpnGateway" + }, + "type": "array" + }, + "kind": { + "default": "compute#vpnGatewayList", + "description": "[Output Only] Type of resource. Always compute#vpnGateway for VPN gateways.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "VpnGatewayStatus": { + "id": "VpnGatewayStatus", + "properties": { + "vpnConnections": { + "description": "List of VPN connection for this VpnGateway.", + "items": { + "$ref": "VpnGatewayStatusVpnConnection" + }, + "type": "array" + } + }, + "type": "object" + }, + "VpnGatewayStatusHighAvailabilityRequirementState": { + "description": "Describes the high availability requirement state for the VPN connection between this Cloud VPN gateway and a peer gateway.", + "id": "VpnGatewayStatusHighAvailabilityRequirementState", + "properties": { + "state": { + "description": "Indicates the high availability requirement state for the VPN connection. Valid values are CONNECTION_REDUNDANCY_MET, CONNECTION_REDUNDANCY_NOT_MET.", + "enum": [ + "CONNECTION_REDUNDANCY_MET", + "CONNECTION_REDUNDANCY_NOT_MET" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "unsatisfiedReason": { + "description": "Indicates the reason why the VPN connection does not meet the high availability redundancy criteria/requirement. Valid values is INCOMPLETE_TUNNELS_COVERAGE.", + "enum": [ + "INCOMPLETE_TUNNELS_COVERAGE" + ], + "enumDescriptions": [ + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "VpnGatewayStatusTunnel": { + "description": "Contains some information about a VPN tunnel.", + "id": "VpnGatewayStatusTunnel", + "properties": { + "localGatewayInterface": { + "description": "The VPN gateway interface this VPN tunnel is associated with.", + "format": "uint32", + "type": "integer" + }, + "peerGatewayInterface": { + "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", + "format": "uint32", + "type": "integer" + }, + "tunnelUrl": { + "description": "URL reference to the VPN tunnel.", + "type": "string" + } + }, + "type": "object" + }, + "VpnGatewayStatusVpnConnection": { + "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", + "id": "VpnGatewayStatusVpnConnection", + "properties": { + "peerExternalGateway": { + "description": "URL reference to the peer external VPN gateways to which the VPN tunnels in this VPN connection are connected. This field is mutually exclusive with peer_gcp_gateway.", + "type": "string" + }, + "peerGcpGateway": { + "description": "URL reference to the peer side VPN gateways to which the VPN tunnels in this VPN connection are connected. This field is mutually exclusive with peer_gcp_gateway.", + "type": "string" + }, + "state": { + "$ref": "VpnGatewayStatusHighAvailabilityRequirementState", + "description": "HighAvailabilityRequirementState for the VPN connection." + }, + "tunnels": { + "description": "List of VPN tunnels that are in this VPN connection.", + "items": { + "$ref": "VpnGatewayStatusTunnel" + }, + "type": "array" + } + }, + "type": "object" + }, + "VpnGatewayVpnGatewayInterface": { + "description": "A VPN gateway interface.", + "id": "VpnGatewayVpnGatewayInterface", + "properties": { + "id": { + "description": "The numeric ID of this VPN gateway interface.", + "format": "uint32", + "type": "integer" + }, + "ipAddress": { + "description": "The external IP address for this VPN gateway interface.", + "type": "string" + } + }, + "type": "object" + }, + "VpnGatewaysGetStatusResponse": { + "id": "VpnGatewaysGetStatusResponse", + "properties": { + "result": { + "$ref": "VpnGatewayStatus" + } + }, + "type": "object" + }, + "VpnGatewaysScopedList": { + "id": "VpnGatewaysScopedList", + "properties": { + "vpnGateways": { + "description": "[Output Only] A list of VPN gateways contained in this scope.", + "items": { + "$ref": "VpnGateway" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "VpnTunnel": { - "description": "VPN tunnel resource. (== resource_for beta.vpnTunnels ==) (== resource_for v1.vpnTunnels ==)", + "description": "Represents a Cloud VPN Tunnel resource.\n\nFor more information about VPN, read the the Cloud VPN Overview. (== resource_for beta.vpnTunnels ==) (== resource_for v1.vpnTunnels ==)", "id": "VpnTunnel", "properties": { "creationTimestamp": { @@ -38392,6 +45681,19 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "peerExternalGateway": { + "description": "URL of the peer side external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. This field is exclusive with the field peerGcpGateway.", + "type": "string" + }, + "peerExternalGatewayInterface": { + "description": "The interface ID of the external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created.", + "format": "int32", + "type": "integer" + }, + "peerGcpGateway": { + "description": "URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. This field can be used when creating highly available VPN from VPC network to VPC network, the field is exclusive with the field peerExternalGateway. If provided, the VPN tunnel will automatically use the same vpnGatewayInterface ID in the peer GCP VPN gateway.", + "type": "string" + }, "peerIp": { "description": "IP address of the peer VPN gateway. Only IPv4 is supported.", "type": "string" @@ -38424,7 +45726,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of the VPN tunnel, which can be one of the following: \n- PROVISIONING: Resource is being allocated for the VPN tunnel. \n- WAITING_FOR_FULL_CONFIG: Waiting to receive all VPN-related configs from the user. Network, TargetVpnGateway, VpnTunnel, ForwardingRule, and Route resources are needed to setup the VPN tunnel. \n- FIRST_HANDSHAKE: Successful first handshake with the peer VPN. \n- ESTABLISHED: Secure session is successfully established with the peer VPN. \n- NETWORK_ERROR: Deprecated, replaced by NO_INCOMING_PACKETS \n- AUTHORIZATION_ERROR: Auth error (for example, bad shared secret). \n- NEGOTIATION_FAILURE: Handshake failed. \n- DEPROVISIONING: Resources are being deallocated for the VPN tunnel. \n- FAILED: Tunnel creation has failed and the tunnel is not ready to be used.", + "description": "[Output Only] The status of the VPN tunnel, which can be one of the following: \n- PROVISIONING: Resource is being allocated for the VPN tunnel. \n- WAITING_FOR_FULL_CONFIG: Waiting to receive all VPN-related configs from the user. Network, TargetVpnGateway, VpnTunnel, ForwardingRule, and Route resources are needed to setup the VPN tunnel. \n- FIRST_HANDSHAKE: Successful first handshake with the peer VPN. \n- ESTABLISHED: Secure session is successfully established with the peer VPN. \n- NETWORK_ERROR: Deprecated, replaced by NO_INCOMING_PACKETS \n- AUTHORIZATION_ERROR: Auth error (for example, bad shared secret). \n- NEGOTIATION_FAILURE: Handshake failed. \n- DEPROVISIONING: Resources are being deallocated for the VPN tunnel. \n- FAILED: Tunnel creation has failed and the tunnel is not ready to be used. \n- NO_INCOMING_PACKETS: No incoming packets from peer. \n- REJECTED: Tunnel configuration was rejected, can be result of being blacklisted. \n- ALLOCATING_RESOURCES: Cloud VPN is in the process of allocating all required resources. \n- STOPPED: Tunnel is stopped due to its Forwarding Rules being deleted for Classic VPN tunnels or the project is in frozen state. \n- PEER_IDENTITY_MISMATCH: Peer identity does not match peer IP, probably behind NAT. \n- TS_NARROWING_NOT_ALLOWED: Traffic selector narrowing not allowed for an HA-VPN tunnel.", "enum": [ "ALLOCATING_RESOURCES", "AUTHORIZATION_ERROR", @@ -38437,6 +45739,7 @@ "NO_INCOMING_PACKETS", "PROVISIONING", "REJECTED", + "STOPPED", "WAITING_FOR_FULL_CONFIG" ], "enumDescriptions": [ @@ -38451,6 +45754,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38458,6 +45762,15 @@ "targetVpnGateway": { "description": "URL of the Target VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created.", "type": "string" + }, + "vpnGateway": { + "description": "URL of the VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created. This must be used (instead of target_vpn_gateway) if a High Availability VPN gateway resource is created.", + "type": "string" + }, + "vpnGatewayInterface": { + "description": "The interface ID of the VPN gateway with which this VPN tunnel is associated.", + "format": "int32", + "type": "integer" } }, "type": "object" @@ -38780,6 +46093,26 @@ }, "type": "object" }, + "WeightedBackendService": { + "description": "In contrast to a single BackendService in HttpRouteAction to which all matching traffic is directed to, WeightedBackendService allows traffic to be split across multiple BackendServices. The volume of traffic for each BackendService is proportional to the weight specified in each WeightedBackendService", + "id": "WeightedBackendService", + "properties": { + "backendService": { + "description": "The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the loadbalancer applies any relevant headerActions specified as part of this backendServiceWeight.", + "type": "string" + }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nheaderAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap." + }, + "weight": { + "description": "Specifies the fraction of traffic sent to backendService, computed as weight / (sum of all weightedBackendService weights in routeAction) .\nThe selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backendService, subsequent requests will be sent to the same backendService as determined by the BackendService's session affinity policy.\nThe value must be between 0 and 1000", + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, "XpnHostList": { "id": "XpnHostList", "properties": { @@ -38915,7 +46248,7 @@ "type": "object" }, "Zone": { - "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==) Next ID: 17", + "description": "Represents a Zone resource.\n\nA zone is a deployment area. These deployment areas are subsets of a region. For example the zone us-east1-a is located in the us-east1 region. For more information, read Regions and Zones. (== resource_for beta.zones ==) (== resource_for v1.zones ==)", "id": "Zone", "properties": { "availableCpuPlatforms": { diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index a8d3b1a643..06da7dba93 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -53,8 +53,8 @@ import ( "strconv" "strings" - gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" htransport "google.golang.org/api/transport/http" ) @@ -76,7 +76,7 @@ var _ = context.Canceled const apiId = "compute:v1" const apiName = "compute" const apiVersion = "v1" -const basePath = "https://www.googleapis.com/compute/v1/projects/" +const basePath = "https://compute.googleapis.com/compute/v1/projects/" // OAuth2 scopes used by this API. const ( @@ -142,6 +142,7 @@ func New(client *http.Client) (*Service, error) { s.BackendServices = NewBackendServicesService(s) s.DiskTypes = NewDiskTypesService(s) s.Disks = NewDisksService(s) + s.ExternalVpnGateways = NewExternalVpnGatewaysService(s) s.Firewalls = NewFirewallsService(s) s.ForwardingRules = NewForwardingRulesService(s) s.GlobalAddresses = NewGlobalAddressesService(s) @@ -172,10 +173,17 @@ func New(client *http.Client) (*Service, error) { s.RegionCommitments = NewRegionCommitmentsService(s) s.RegionDiskTypes = NewRegionDiskTypesService(s) s.RegionDisks = NewRegionDisksService(s) + s.RegionHealthChecks = NewRegionHealthChecksService(s) s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) s.RegionOperations = NewRegionOperationsService(s) + s.RegionSslCertificates = NewRegionSslCertificatesService(s) + s.RegionTargetHttpProxies = NewRegionTargetHttpProxiesService(s) + s.RegionTargetHttpsProxies = NewRegionTargetHttpsProxiesService(s) + s.RegionUrlMaps = NewRegionUrlMapsService(s) s.Regions = NewRegionsService(s) + s.Reservations = NewReservationsService(s) + s.ResourcePolicies = NewResourcePoliciesService(s) s.Routers = NewRoutersService(s) s.Routes = NewRoutesService(s) s.SecurityPolicies = NewSecurityPoliciesService(s) @@ -191,6 +199,7 @@ func New(client *http.Client) (*Service, error) { s.TargetTcpProxies = NewTargetTcpProxiesService(s) s.TargetVpnGateways = NewTargetVpnGatewaysService(s) s.UrlMaps = NewUrlMapsService(s) + s.VpnGateways = NewVpnGatewaysService(s) s.VpnTunnels = NewVpnTunnelsService(s) s.ZoneOperations = NewZoneOperationsService(s) s.Zones = NewZonesService(s) @@ -216,6 +225,8 @@ type Service struct { Disks *DisksService + ExternalVpnGateways *ExternalVpnGatewaysService + Firewalls *FirewallsService ForwardingRules *ForwardingRulesService @@ -276,14 +287,28 @@ type Service struct { RegionDisks *RegionDisksService + RegionHealthChecks *RegionHealthChecksService + RegionInstanceGroupManagers *RegionInstanceGroupManagersService RegionInstanceGroups *RegionInstanceGroupsService RegionOperations *RegionOperationsService + RegionSslCertificates *RegionSslCertificatesService + + RegionTargetHttpProxies *RegionTargetHttpProxiesService + + RegionTargetHttpsProxies *RegionTargetHttpsProxiesService + + RegionUrlMaps *RegionUrlMapsService + Regions *RegionsService + Reservations *ReservationsService + + ResourcePolicies *ResourcePoliciesService + Routers *RoutersService Routes *RoutesService @@ -314,6 +339,8 @@ type Service struct { UrlMaps *UrlMapsService + VpnGateways *VpnGatewaysService + VpnTunnels *VpnTunnelsService ZoneOperations *ZoneOperationsService @@ -391,6 +418,15 @@ type DisksService struct { s *Service } +func NewExternalVpnGatewaysService(s *Service) *ExternalVpnGatewaysService { + rs := &ExternalVpnGatewaysService{s: s} + return rs +} + +type ExternalVpnGatewaysService struct { + s *Service +} + func NewFirewallsService(s *Service) *FirewallsService { rs := &FirewallsService{s: s} return rs @@ -661,6 +697,15 @@ type RegionDisksService struct { s *Service } +func NewRegionHealthChecksService(s *Service) *RegionHealthChecksService { + rs := &RegionHealthChecksService{s: s} + return rs +} + +type RegionHealthChecksService struct { + s *Service +} + func NewRegionInstanceGroupManagersService(s *Service) *RegionInstanceGroupManagersService { rs := &RegionInstanceGroupManagersService{s: s} return rs @@ -688,6 +733,42 @@ type RegionOperationsService struct { s *Service } +func NewRegionSslCertificatesService(s *Service) *RegionSslCertificatesService { + rs := &RegionSslCertificatesService{s: s} + return rs +} + +type RegionSslCertificatesService struct { + s *Service +} + +func NewRegionTargetHttpProxiesService(s *Service) *RegionTargetHttpProxiesService { + rs := &RegionTargetHttpProxiesService{s: s} + return rs +} + +type RegionTargetHttpProxiesService struct { + s *Service +} + +func NewRegionTargetHttpsProxiesService(s *Service) *RegionTargetHttpsProxiesService { + rs := &RegionTargetHttpsProxiesService{s: s} + return rs +} + +type RegionTargetHttpsProxiesService struct { + s *Service +} + +func NewRegionUrlMapsService(s *Service) *RegionUrlMapsService { + rs := &RegionUrlMapsService{s: s} + return rs +} + +type RegionUrlMapsService struct { + s *Service +} + func NewRegionsService(s *Service) *RegionsService { rs := &RegionsService{s: s} return rs @@ -697,6 +778,24 @@ type RegionsService struct { s *Service } +func NewReservationsService(s *Service) *ReservationsService { + rs := &ReservationsService{s: s} + return rs +} + +type ReservationsService struct { + s *Service +} + +func NewResourcePoliciesService(s *Service) *ResourcePoliciesService { + rs := &ResourcePoliciesService{s: s} + return rs +} + +type ResourcePoliciesService struct { + s *Service +} + func NewRoutersService(s *Service) *RoutersService { rs := &RoutersService{s: s} return rs @@ -832,6 +931,15 @@ type UrlMapsService struct { s *Service } +func NewVpnGatewaysService(s *Service) *VpnGatewaysService { + rs := &VpnGatewaysService{s: s} + return rs +} + +type VpnGatewaysService struct { + s *Service +} + func NewVpnTunnelsService(s *Service) *VpnTunnelsService { rs := &VpnTunnelsService{s: s} return rs @@ -898,7 +1006,12 @@ func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AcceleratorType: An Accelerator Type resource. (== resource_for +// AcceleratorType: Represents an Accelerator Type resource. +// +// Google Cloud Platform provides graphics processing units +// (accelerators) that you can add to VM instances to improve or +// accelerate performance when working with intensive workloads. For +// more information, read GPUs on Compute Engine. (== resource_for // beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==) type AcceleratorType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -1421,8 +1534,8 @@ type AccessConfig struct { Kind string `json:"kind,omitempty"` // Name: The name of this access configuration. The default and - // recommended name is External NAT but you can use any arbitrary string - // you would like. For example, My external IP or Network Access. + // recommended name is External NAT, but you can use any arbitrary + // string, such as My external IP or Network Access. Name string `json:"name,omitempty"` // NatIP: An external IP address associated with this instance. Specify @@ -1449,10 +1562,10 @@ type AccessConfig struct { NetworkTier string `json:"networkTier,omitempty"` // PublicPtrDomainName: The DNS domain name for the public PTR record. - // This field can only be set when the set_public_ptr field is enabled. + // You can set this field only if the `setPublicPtr` field is enabled. PublicPtrDomainName string `json:"publicPtrDomainName,omitempty"` - // SetPublicPtr: Specifies whether a public DNS ?PTR? record should be + // SetPublicPtr: Specifies whether a public DNS 'PTR' record should be // created to map the external IP address of the instance to a DNS // domain name. SetPublicPtr bool `json:"setPublicPtr,omitempty"` @@ -1487,9 +1600,25 @@ func (s *AccessConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Address: A reserved address resource. (== resource_for beta.addresses -// ==) (== resource_for v1.addresses ==) (== resource_for -// beta.globalAddresses ==) (== resource_for v1.globalAddresses ==) +// Address: Represents an IP Address resource. +// +// An address resource represents a regional internal IP address. +// Regional internal IP addresses are RFC 1918 addresses that come from +// either a primary or secondary IP range of a subnet in a VPC network. +// Regional external IP addresses can be assigned to GCP VM instances, +// Cloud VPN gateways, regional external forwarding rules for network +// load balancers (in either Standard or Premium Tier), and regional +// external forwarding rules for HTTP(S), SSL Proxy, and TCP Proxy load +// balancers in Standard Tier. For more information, read IP +// addresses. +// +// A globalAddresses resource represent a global external IP address. +// Global external IP addresses are IPv4 or IPv6 addresses. They can +// only be assigned to global forwarding rules for HTTP(S), SSL Proxy, +// or TCP Proxy load balancers in Premium Tier. For more information, +// read Global resources. (== resource_for beta.addresses ==) (== +// resource_for v1.addresses ==) (== resource_for beta.globalAddresses +// ==) (== resource_for v1.globalAddresses ==) type Address struct { // Address: The static IP address represented by this resource. Address string `json:"address,omitempty"` @@ -1508,14 +1637,14 @@ type Address struct { CreationTimestamp string `json:"creationTimestamp,omitempty"` // Description: An optional description of this resource. Provide this - // property when you create the resource. + // field when you create the resource. Description string `json:"description,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // IpVersion: The IP Version that will be used by this address. Valid + // IpVersion: The IP version that will be used by this address. Valid // options are IPV4 or IPV6. This can only be specified for a global // address. // @@ -1532,23 +1661,24 @@ type Address struct { // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first + // character must be a lowercase letter, and all following characters + // (except for the last character) must be a dash, lowercase letter, or + // digit. The last character must be a lowercase letter or digit. Name string `json:"name,omitempty"` // Network: The URL of the network in which to reserve the address. This - // field can only be used with INTERNAL type with VPC_PEERING purpose. + // field can only be used with INTERNAL type with the VPC_PEERING + // purpose. Network string `json:"network,omitempty"` // NetworkTier: This signifies the networking tier used for configuring - // this Address and can only take the following values: PREMIUM, + // this address and can only take the following values: PREMIUM or // STANDARD. Global forwarding rules can only be Premium Tier. Regional // forwarding rules can be either Premium or Standard Tier. Standard // Tier addresses applied to regional forwarding rules can be used with // any external load balancer. Regional forwarding rules in Premium Tier - // can only be used with a Network load balancer. + // can only be used with a network load balancer. // // If this field is not specified, it is assumed to be PREMIUM. // @@ -1561,7 +1691,15 @@ type Address struct { // range. PrefixLength int64 `json:"prefixLength,omitempty"` - // Purpose: The purpose of resource, only used with INTERNAL type. + // Purpose: The purpose of this resource, which can be one of the + // following values: + // - `GCE_ENDPOINT` for addresses that are used by VM instances, alias + // IP ranges, internal load balancers, and similar resources. + // - `DNS_RESOLVER` for a DNS resolver address in a subnetwork + // - `VPC_PEERING` for addresses that are reserved for VPC peer + // networks. + // - `NAT_AUTO` for addresses that are external IP addresses + // automatically reserved for Cloud NAT. // // Possible values: // "DNS_RESOLVER" @@ -1570,10 +1708,9 @@ type Address struct { // "VPC_PEERING" Purpose string `json:"purpose,omitempty"` - // Region: [Output Only] URL of the region where the regional address - // resides. This field is not applicable to global addresses. You must - // specify this field as part of the HTTP request URL. You cannot set - // this field in the request body. + // Region: [Output Only] The URL of the region where the regional + // address resides. This field is not applicable to global addresses. + // You must specify this field as part of the HTTP request URL. Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -1594,7 +1731,7 @@ type Address struct { // Subnetwork: The URL of the subnetwork in which to reserve the // address. If an IP address is specified, it must be within the // subnetwork's IP range. This field can only be used with INTERNAL type - // with GCE_ENDPOINT/DNS_RESOLVER purposes. + // with a GCE_ENDPOINT or DNS_RESOLVER purpose. Subnetwork string `json:"subnetwork,omitempty"` // Users: [Output Only] The URLs of the resources that are using this @@ -2074,17 +2211,17 @@ func (s *AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { // AliasIpRange: An alias IP range attached to an instance's network // interface. type AliasIpRange struct { - // IpCidrRange: The IP CIDR range represented by this alias IP range. - // This IP CIDR range must belong to the specified subnetwork and cannot + // IpCidrRange: The IP alias ranges to allocate for this interface. This + // IP CIDR range must belong to the specified subnetwork and cannot // contain IP addresses reserved by system or used by other network - // interfaces. This range may be a single IP address (e.g. 10.2.3.4), a - // netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24). + // interfaces. This range may be a single IP address (such as 10.2.3.4), + // a netmask (such as /24) or a CIDR-formatted string (such as + // 10.1.2.0/24). IpCidrRange string `json:"ipCidrRange,omitempty"` - // SubnetworkRangeName: Optional subnetwork secondary range name - // specifying the secondary range from which to allocate the IP CIDR - // range for this alias IP range. If left unspecified, the primary range - // of the subnetwork will be used. + // SubnetworkRangeName: The name of a subnetwork secondary IP range from + // which to allocate an IP alias range. If not specified, the primary + // range of the subnetwork is used. SubnetworkRangeName string `json:"subnetworkRangeName,omitempty"` // ForceSendFields is a list of field names (e.g. "IpCidrRange") to @@ -2110,6 +2247,121 @@ func (s *AliasIpRange) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk struct { + // DiskSizeGb: Specifies the size of the disk in base-2 GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + + // Interface: Specifies the disk interface to use for attaching this + // disk, which is either SCSI or NVME. The default is SCSI. For + // performance characteristics of SCSI over NVMe, see Local SSD + // performance. + // + // Possible values: + // "NVME" + // "SCSI" + Interface string `json:"interface,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskSizeGb") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskSizeGb") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk) MarshalJSON() ([]byte, error) { + type NoMethod AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AllocationSpecificSKUAllocationReservedInstanceProperties: Properties +// of the SKU instances being reserved. +type AllocationSpecificSKUAllocationReservedInstanceProperties struct { + // GuestAccelerators: Specifies accelerator type and count. + GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` + + // LocalSsds: Specifies amount of local ssd to reserve with each + // instance. The type of disk is local-ssd. + LocalSsds []*AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk `json:"localSsds,omitempty"` + + // MachineType: Specifies type of machine (name only) which has fixed + // number of vCPUs and fixed amount of memory. This also includes + // specifying custom machine type following + // custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. + MachineType string `json:"machineType,omitempty"` + + // MinCpuPlatform: Minimum cpu platform the reservation. + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GuestAccelerators") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GuestAccelerators") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocationSpecificSKUAllocationReservedInstanceProperties) MarshalJSON() ([]byte, error) { + type NoMethod AllocationSpecificSKUAllocationReservedInstanceProperties + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AllocationSpecificSKUReservation: This reservation type allows to pre +// allocate specific instance configuration. +type AllocationSpecificSKUReservation struct { + // Count: Specifies the number of resources that are allocated. + Count int64 `json:"count,omitempty,string"` + + // InUseCount: [Output Only] Indicates how many instances are in use. + InUseCount int64 `json:"inUseCount,omitempty,string"` + + // InstanceProperties: The instance properties for the reservation. + InstanceProperties *AllocationSpecificSKUAllocationReservedInstanceProperties `json:"instanceProperties,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Count") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AllocationSpecificSKUReservation) MarshalJSON() ([]byte, error) { + type NoMethod AllocationSpecificSKUReservation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AttachedDisk: An instance-attached disk resource. type AttachedDisk struct { // AutoDelete: Specifies whether the disk will be auto-deleted when the @@ -2180,7 +2432,8 @@ type AttachedDisk struct { // disks must always use SCSI and the request will fail if you attempt // to attach a persistent disk in any other format than SCSI. Local SSDs // can use either NVME or SCSI. For performance characteristics of SCSI - // over NVMe, see Local SSD performance. + // over NVMe, see Local SSD performance. TODO(b/131765817): Update + // documentation when NVME is supported. // // Possible values: // "NVME" @@ -2205,8 +2458,8 @@ type AttachedDisk struct { // Source: Specifies a valid partial or full URL to an existing // Persistent Disk resource. When creating a new instance, one of - // initializeParams.sourceImage or disks.source is required except for - // local SSD. + // initializeParams.sourceImage or initializeParams.sourceSnapshot or + // disks.source is required except for local SSD. // // If desired, you can also attach existing non-root persistent disks // using this property. This field is only applicable for persistent @@ -2265,7 +2518,9 @@ type AttachedDiskInitializeParams struct { // automatically generated. DiskName string `json:"diskName,omitempty"` - // DiskSizeGb: Specifies the size of the disk in base-2 GB. + // DiskSizeGb: Specifies the size of the disk in base-2 GB. If not + // specified, the disk will be the same size as the image (usually + // 10GB). If specified, the size must be equal to or larger than 10GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` // DiskType: Specifies the disk type to use to create the instance. If @@ -2291,9 +2546,15 @@ type AttachedDiskInitializeParams struct { // persistent disks. Labels map[string]string `json:"labels,omitempty"` + // ResourcePolicies: Resource policies applied to this disk for + // automatic snapshot creations. Specified using the full or partial + // URL. For instance template, specify only the resource policy name. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // SourceImage: The source image to create this disk. When creating a - // new instance, one of initializeParams.sourceImage or disks.source is - // required except for local SSD. + // new instance, one of initializeParams.sourceImage or + // initializeParams.sourceSnapshot or disks.source is required except + // for local SSD. // // To create a disk with one of the public operating system images, // specify the image by its family name. For example, specify @@ -2334,6 +2595,24 @@ type AttachedDiskInitializeParams struct { // the source images are encrypted with your own keys. SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` + // SourceSnapshot: The source snapshot to create this disk. When + // creating a new instance, one of initializeParams.sourceSnapshot or + // initializeParams.sourceImage or disks.source is required except for + // local SSD. + // + // To create a disk with a snapshot that you created, specify the + // snapshot name in the following + // format: + // global/snapshots/my-backup + // + // + // If the source snapshot is deleted later, this field will not be set. + SourceSnapshot string `json:"sourceSnapshot,omitempty"` + + // SourceSnapshotEncryptionKey: The customer-supplied encryption key of + // the source snapshot. + SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` + // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -2371,15 +2650,15 @@ func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { // // { "audit_configs": [ { "service": "allServices" "audit_log_configs": // [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:foo@gmail.com" ] }, { "log_type": "DATA_WRITE", }, { +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE", }, { // "log_type": "ADMIN_READ", } ] }, { "service": -// "fooservice.googleapis.com" "audit_log_configs": [ { "log_type": +// "sampleservice.googleapis.com" "audit_log_configs": [ { "log_type": // "DATA_READ", }, { "log_type": "DATA_WRITE", "exempted_members": [ -// "user:bar@gmail.com" ] } ] } ] } +// "user:aliya@example.com" ] } ] } ] } // -// For fooservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ logging. It also exempts foo@gmail.com from DATA_READ -// logging, and bar@gmail.com from DATA_WRITE logging. +// For sampleservice, this policy enables DATA_READ, DATA_WRITE and +// ADMIN_READ logging. It also exempts jose@example.com from DATA_READ +// logging, and aliya@example.com from DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. @@ -2420,17 +2699,19 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { // permissions. Example: // // { "audit_log_configs": [ { "log_type": "DATA_READ", -// "exempted_members": [ "user:foo@gmail.com" ] }, { "log_type": +// "exempted_members": [ "user:jose@example.com" ] }, { "log_type": // "DATA_WRITE", } ] } // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting -// foo@gmail.com from DATA_READ logging. +// jose@example.com from DATA_READ logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging // for this type of permission. Follows the same format of // [Binding.members][]. ExemptedMembers []string `json:"exemptedMembers,omitempty"` + IgnoreChildExemptions bool `json:"ignoreChildExemptions,omitempty"` + // LogType: The log type that this config enables. // // Possible values: @@ -2501,13 +2782,21 @@ func (s *AuthorizationLoggingOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Autoscaler: Represents an Autoscaler resource. Autoscalers allow you -// to automatically scale virtual machine instances in managed instance -// groups according to an autoscaling policy that you define. For more -// information, read Autoscaling Groups of Instances. (== resource_for -// beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== -// resource_for beta.regionAutoscalers ==) (== resource_for -// v1.regionAutoscalers ==) +// Autoscaler: Represents an Autoscaler resource. +// +// +// +// Use autoscalers to automatically add or delete instances from a +// managed instance group according to your defined autoscaling policy. +// For more information, read Autoscaling Groups of Instances. +// +// For zonal managed instance groups resource, use the autoscaler +// resource. +// +// For regional managed instance groups, use the regionAutoscalers +// resource. (== resource_for beta.autoscalers ==) (== resource_for +// v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== +// resource_for v1.regionAutoscalers ==) type Autoscaler struct { // AutoscalingPolicy: The configuration parameters for the autoscaling // algorithm. You can define one or more of the policies for an @@ -2551,6 +2840,13 @@ type Autoscaler struct { SelfLink string `json:"selfLink,omitempty"` // Status: [Output Only] The status of the autoscaler configuration. + // Current set of possible values: PENDING: Autoscaler backend hasn't + // read new/updated configuration DELETING: Configuration is being + // deleted ACTIVE: Configuration is acknowledged to be effective. Some + // warnings might or might not be present in the status_details field. + // ERROR: Configuration has errors. Actionable for users. Details are + // present in the status_details field. New values might be added in the + // future. // // Possible values: // "ACTIVE" @@ -2914,7 +3210,42 @@ type AutoscalerStatusDetails struct { // Message: The status message. Message string `json:"message,omitempty"` - // Type: The type of error returned. + // Type: The type of error, warning or notice returned. Current set of + // possible values: ALL_INSTANCES_UNHEALTHY (WARNING): All instances in + // the instance group are unhealthy (not in RUNNING state). + // BACKEND_SERVICE_DOES_NOT_EXIST (ERROR): There is no backend service + // attached to the instance group. CAPPED_AT_MAX_NUM_REPLICAS (WARNING): + // Autoscaler recommends size bigger than maxNumReplicas. + // CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE (WARNING): The custom metric + // samples are not exported often enough to be a credible base for + // autoscaling. CUSTOM_METRIC_INVALID (ERROR): The custom metric that + // was specified does not exist or does not have the necessary labels. + // MIN_EQUALS_MAX (WARNING): The minNumReplicas is equal to + // maxNumReplicas. This means the autoscaler cannot add or remove + // instances from the instance group. MISSING_CUSTOM_METRIC_DATA_POINTS + // (WARNING): The autoscaler did not receive any data from the custom + // metric configured for autoscaling. MISSING_LOAD_BALANCING_DATA_POINTS + // (WARNING): The autoscaler is configured to scale based on a load + // balancing signal but the instance group has not received any requests + // from the load balancer. MODE_OFF (WARNING): Autoscaling is turned + // off. The number of instances in the group won't change automatically. + // The autoscaling configuration is preserved. MODE_ONLY_UP (WARNING): + // Autoscaling is in the "Autoscale only up" mode. Instances in the + // group will be only added. MORE_THAN_ONE_BACKEND_SERVICE (ERROR): The + // instance group cannot be autoscaled because it has more than one + // backend service attached to it. NOT_ENOUGH_QUOTA_AVAILABLE (ERROR): + // Exceeded quota for necessary resources, such as CPU, number of + // instances and so on. REGION_RESOURCE_STOCKOUT (ERROR): Showed only + // for regional autoscalers: there is a resource stockout in the chosen + // region. SCALING_TARGET_DOES_NOT_EXIST (ERROR): The target to be + // scaled does not exist. + // UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION (ERROR): + // Autoscaling does not work with an HTTP/S load balancer that has been + // configured for maxRate. ZONE_RESOURCE_STOCKOUT (ERROR): For zonal + // autoscalers: there is a resource stockout in the chosen zone. For + // regional autoscalers: in at least one of the zones you're using there + // is a resource stockout. New values might be added in the future. Some + // of the values might not be available in all API versions. // // Possible values: // "ALL_INSTANCES_UNHEALTHY" @@ -2925,6 +3256,7 @@ type AutoscalerStatusDetails struct { // "MIN_EQUALS_MAX" // "MISSING_CUSTOM_METRIC_DATA_POINTS" // "MISSING_LOAD_BALANCING_DATA_POINTS" + // "MODE_OFF" // "MORE_THAN_ONE_BACKEND_SERVICE" // "NOT_ENOUGH_QUOTA_AVAILABLE" // "REGION_RESOURCE_STOCKOUT" @@ -3326,13 +3658,40 @@ func (s *AutoscalingPolicyLoadBalancingUtilization) UnmarshalJSON(data []byte) e // Backend: Message containing information of one individual backend. type Backend struct { - // BalancingMode: Specifies the balancing mode for this backend. For - // global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. - // Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for - // TCP/SSL). + // BalancingMode: Specifies the balancing mode for the backend. // - // For Internal Load Balancing, the default and only supported mode is - // CONNECTION. + // When choosing a balancing mode, you need to consider the + // loadBalancingScheme, and protocol for the backend service, as well as + // the type of backend (instance group or NEG). + // + // + // - If the load balancing mode is CONNECTION, then the load is spread + // based on how many concurrent connections the backend can handle. + // You can use the CONNECTION balancing mode if the protocol for the + // backend service is SSL, TCP, or UDP. + // + // If the loadBalancingScheme for the backend service is EXTERNAL (SSL + // Proxy and TCP Proxy load balancers), you must also specify exactly + // one of the following parameters: maxConnections, + // maxConnectionsPerInstance, or maxConnectionsPerEndpoint. + // + // If the loadBalancingScheme for the backend service is INTERNAL + // (internal TCP/UDP load balancers), you cannot specify any additional + // parameters. + // + // - If the load balancing mode is RATE, the load is spread based on the + // rate of HTTP requests per second (RPS). + // You can use the RATE balancing mode if the protocol for the backend + // service is HTTP or HTTPS. You must specify exactly one of the + // following parameters: maxRate, maxRatePerInstance, or + // maxRatePerEndpoint. + // + // - If the load balancing mode is UTILIZATION, the load is spread based + // on the CPU utilization of instances in an instance group. + // You can use the UTILIZATION balancing mode if the loadBalancingScheme + // of the backend service is EXTERNAL, INTERNAL_SELF_MANAGED, or + // INTERNAL_MANAGED and the backends are instance groups. There are no + // restrictions on the backend service protocol. // // Possible values: // "CONNECTION" @@ -3354,54 +3713,72 @@ type Backend struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Group: The fully-qualified URL of an Instance Group or Network - // Endpoint Group resource. In case of instance group this defines the - // list of instances that serve traffic. Member virtual machine - // instances from each instance group must live in the same zone as the - // instance group itself. No two backends in a backend service are - // allowed to use same Instance Group resource. + // Group: The fully-qualified URL of an instance group or network + // endpoint group (NEG) resource. The type of backend that a backend + // service supports depends on the backend service's + // loadBalancingScheme. // - // For Network Endpoint Groups this defines list of endpoints. All - // endpoints of Network Endpoint Group must be hosted on instances - // located in the same zone as the Network Endpoint Group. // - // Backend service can not contain mix of Instance Group and Network - // Endpoint Group backends. + // - When the loadBalancingScheme for the backend service is EXTERNAL, + // INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, the backend can be either + // an instance group or a NEG. The backends on the backend service must + // be either all instance groups or all NEGs. You cannot mix instance + // group and NEG backends on the same backend service. // - // Note that you must specify an Instance Group or Network Endpoint - // Group resource using the fully-qualified URL, rather than a partial - // URL. // - // When the BackendService has load balancing scheme INTERNAL, the - // instance group must be within the same region as the BackendService. - // Network Endpoint Groups are not supported for INTERNAL load balancing - // scheme. + // - When the loadBalancingScheme for the backend service is INTERNAL, + // the backend must be an instance group in the same region as the + // backend service. NEGs are not supported. + // + // You must use the fully-qualified URL (starting with + // https://www.googleapis.com/) to specify the instance group or NEG. + // Partial URLs are not supported. Group string `json:"group,omitempty"` - // MaxConnections: The max number of simultaneous connections for the - // group. Can be used with either CONNECTION or UTILIZATION balancing - // modes. For CONNECTION mode, either maxConnections or - // maxConnectionsPerInstance must be set. + // MaxConnections: Defines a maximum target for simultaneous connections + // for the entire backend (instance group or NEG). If the backend's + // balancingMode is UTILIZATION, this is an optional parameter. If the + // backend's balancingMode is CONNECTION, and backend is attached to a + // backend service whose loadBalancingScheme is EXTERNAL, you must + // specify either this parameter, maxConnectionsPerInstance, or + // maxConnectionsPerEndpoint. // - // This cannot be used for internal load balancing. + // Not available if the backend's balancingMode is RATE. If the + // loadBalancingScheme is INTERNAL, then maxConnections is not + // supported, even though the backend requires a balancing mode of + // CONNECTION. MaxConnections int64 `json:"maxConnections,omitempty"` - // MaxConnectionsPerEndpoint: The max number of simultaneous connections - // that a single backend network endpoint can handle. This is used to - // calculate the capacity of the group. Can be used in either CONNECTION - // or UTILIZATION balancing modes. For CONNECTION mode, either - // maxConnections or maxConnectionsPerEndpoint must be set. + // MaxConnectionsPerEndpoint: Defines a maximum target for simultaneous + // connections for an endpoint of a NEG. This is multiplied by the + // number of endpoints in the NEG to implicitly calculate a maximum + // number of target maximum simultaneous connections for the NEG. If the + // backend's balancingMode is CONNECTION, and the backend is attached to + // a backend service whose loadBalancingScheme is EXTERNAL, you must + // specify either this parameter, maxConnections, or + // maxConnectionsPerInstance. // - // This cannot be used for internal load balancing. + // Not available if the backend's balancingMode is RATE. Internal + // TCP/UDP load balancing does not support setting + // maxConnectionsPerEndpoint even though its backends require a + // balancing mode of CONNECTION. MaxConnectionsPerEndpoint int64 `json:"maxConnectionsPerEndpoint,omitempty"` - // MaxConnectionsPerInstance: The max number of simultaneous connections - // that a single backend instance can handle. This is used to calculate - // the capacity of the group. Can be used in either CONNECTION or - // UTILIZATION balancing modes. For CONNECTION mode, either - // maxConnections or maxConnectionsPerInstance must be set. + // MaxConnectionsPerInstance: Defines a maximum target for simultaneous + // connections for a single VM in a backend instance group. This is + // multiplied by the number of instances in the instance group to + // implicitly calculate a target maximum number of simultaneous + // connections for the whole instance group. If the backend's + // balancingMode is UTILIZATION, this is an optional parameter. If the + // backend's balancingMode is CONNECTION, and backend is attached to a + // backend service whose loadBalancingScheme is EXTERNAL, you must + // specify either this parameter, maxConnections, or + // maxConnectionsPerEndpoint. // - // This cannot be used for internal load balancing. + // Not available if the backend's balancingMode is RATE. Internal + // TCP/UDP load balancing does not support setting + // maxConnectionsPerInstance even though its backends require a + // balancing mode of CONNECTION. MaxConnectionsPerInstance int64 `json:"maxConnectionsPerInstance,omitempty"` // MaxRate: The max requests per second (RPS) of the group. Can be used @@ -3412,27 +3789,36 @@ type Backend struct { // This cannot be used for internal load balancing. MaxRate int64 `json:"maxRate,omitempty"` - // MaxRatePerEndpoint: The max requests per second (RPS) that a single - // backend network endpoint can handle. This is used to calculate the - // capacity of the group. Can be used in either balancing mode. For RATE - // mode, either maxRate or maxRatePerEndpoint must be set. + // MaxRatePerEndpoint: Defines a maximum target for requests per second + // (RPS) for an endpoint of a NEG. This is multiplied by the number of + // endpoints in the NEG to implicitly calculate a target maximum rate + // for the NEG. // - // This cannot be used for internal load balancing. + // If the backend's balancingMode is RATE, you must specify either this + // parameter, maxRate, or maxRatePerInstance. + // + // Not available if the backend's balancingMode is CONNECTION. MaxRatePerEndpoint float64 `json:"maxRatePerEndpoint,omitempty"` - // MaxRatePerInstance: The max requests per second (RPS) that a single - // backend instance can handle. This is used to calculate the capacity - // of the group. Can be used in either balancing mode. For RATE mode, - // either maxRate or maxRatePerInstance must be set. + // MaxRatePerInstance: Defines a maximum target for requests per second + // (RPS) for a single VM in a backend instance group. This is multiplied + // by the number of instances in the instance group to implicitly + // calculate a target maximum rate for the whole instance group. // - // This cannot be used for internal load balancing. + // If the backend's balancingMode is UTILIZATION, this is an optional + // parameter. If the backend's balancingMode is RATE, you must specify + // either this parameter, maxRate, or maxRatePerEndpoint. + // + // Not available if the backend's balancingMode is CONNECTION. MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` - // MaxUtilization: Used when balancingMode is UTILIZATION. This ratio - // defines the CPU utilization target for the group. The default is 0.8. - // Valid range is [0.0, 1.0]. + // MaxUtilization: Defines the maximum average CPU utilization of a + // backend VM in an instance group. The valid range is [0.0, 1.0]. This + // is an optional parameter if the backend's balancingMode is + // UTILIZATION. // - // This cannot be used for internal load balancing. + // This parameter can be used in conjunction with maxRate, + // maxRatePerInstance, maxConnections, or maxConnectionsPerInstance. MaxUtilization float64 `json:"maxUtilization,omitempty"` // ForceSendFields is a list of field names (e.g. "BalancingMode") to @@ -3478,8 +3864,10 @@ func (s *Backend) UnmarshalJSON(data []byte) error { return nil } -// BackendBucket: A BackendBucket resource. This resource defines a -// Cloud Storage bucket. +// BackendBucket: Represents a Cloud Storage Bucket resource. +// +// This Cloud Storage bucket resource is referenced by a URL map of a +// load balancer. For more information, read Backend Buckets. type BackendBucket struct { // BucketName: Cloud Storage bucket name. BucketName string `json:"bucketName,omitempty"` @@ -3741,17 +4129,19 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// BackendService: A BackendService resource. This resource defines a -// group of backend virtual machines and their serving capacity. (== -// resource_for v1.backendService ==) (== resource_for +// BackendService: Represents a Backend Service resource. +// +// A backend service contains configuration values for Google Cloud +// Platform load balancing services. +// +// For more information, read Backend Services. +// +// (== resource_for v1.backendService ==) (== resource_for // beta.backendService ==) type BackendService struct { - // AffinityCookieTtlSec: Lifetime of cookies in seconds if - // session_affinity is GENERATED_COOKIE. If set to 0, the cookie is - // non-persistent and lasts only until the end of the browser session - // (or equivalent). The maximum allowed value for TTL is one day. - // - // When the load balancing scheme is INTERNAL, this field is not used. + // AffinityCookieTtlSec: If set to 0, the cookie is non-persistent and + // lasts only until the end of the browser session (or equivalent). The + // maximum allowed value is one day (86,400). AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -3760,8 +4150,37 @@ type BackendService struct { // CdnPolicy: Cloud CDN configuration for this BackendService. CdnPolicy *BackendServiceCdnPolicy `json:"cdnPolicy,omitempty"` + // CircuitBreakers: Settings controlling the volume of connections to a + // backend service. If not set, this feature is considered + // disabled. + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + CircuitBreakers *CircuitBreakers `json:"circuitBreakers,omitempty"` + ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` + // ConsistentHash: Consistent Hash-based load balancing can be used to + // provide soft session affinity based on HTTP headers, cookies or other + // properties. This load balancing policy is applicable only for HTTP + // connections. The affinity to a particular destination host will be + // lost when one or more hosts are added/removed from the destination + // service. This field specifies parameters that control consistent + // hashing. This field is only applicable when localityLbPolicy is set + // to MAGLEV or RING_HASH. + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + ConsistentHash *ConsistentHashLoadBalancerSettings `json:"consistentHash,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -3774,9 +4193,9 @@ type BackendService struct { // property when you create the resource. Description string `json:"description,omitempty"` - // EnableCDN: If true, enable Cloud CDN for this BackendService. - // - // When the load balancing scheme is INTERNAL, this field is not used. + // EnableCDN: If true, enables Cloud CDN for the backend service. Only + // applicable if the loadBalancingScheme is EXTERNAL and the protocol is + // HTTP or HTTPS. EnableCDN bool `json:"enableCDN,omitempty"` // Fingerprint: Fingerprint of this resource. A hash of the contents @@ -3810,17 +4229,60 @@ type BackendService struct { // for backend services. Kind string `json:"kind,omitempty"` - // LoadBalancingScheme: Indicates whether the backend service will be - // used with internal or external load balancing. A backend service - // created for one type of load balancing cannot be used with the other. - // Possible values are INTERNAL and EXTERNAL. + // LoadBalancingScheme: Specifies the load balancer type. Choose + // EXTERNAL for load balancers that receive traffic from external + // clients. Choose INTERNAL for Internal TCP/UDP Load Balancing. Choose + // INTERNAL_MANAGED for Internal HTTP(S) Load Balancing. Choose + // INTERNAL_SELF_MANAGED for Traffic Director. A backend service created + // for one type of load balancing cannot be used with another. For more + // information, refer to Choosing a load balancer. // // Possible values: // "EXTERNAL" // "INTERNAL" + // "INTERNAL_MANAGED" + // "INTERNAL_SELF_MANAGED" // "INVALID_LOAD_BALANCING_SCHEME" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` + // LocalityLbPolicy: The load balancing algorithm used within the scope + // of the locality. The possible values are: + // - ROUND_ROBIN: This is a simple policy in which each healthy backend + // is selected in round robin order. This is the default. + // - LEAST_REQUEST: An O(1) algorithm which selects two random healthy + // hosts and picks the host which has fewer active requests. + // - RING_HASH: The ring/modulo hash load balancer implements consistent + // hashing to backends. The algorithm has the property that the + // addition/removal of a host from a set of N hosts only affects 1/N of + // the requests. + // - RANDOM: The load balancer selects a random healthy host. + // - ORIGINAL_DESTINATION: Backend host is selected based on the client + // connection metadata, i.e., connections are opened to the same address + // as the destination address of the incoming connection before the + // connection was redirected to the load balancer. + // - MAGLEV: used as a drop in replacement for the ring hash load + // balancer. Maglev is not as stable as ring hash but has faster table + // lookup build times and host selection times. For more information + // about Maglev, refer to https://ai.google/research/pubs/pub44824 + // + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + // + // Possible values: + // "INVALID_LB_POLICY" + // "LEAST_REQUEST" + // "MAGLEV" + // "ORIGINAL_DESTINATION" + // "RANDOM" + // "RING_HASH" + // "ROUND_ROBIN" + LocalityLbPolicy string `json:"localityLbPolicy,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -3830,27 +4292,45 @@ type BackendService struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // OutlierDetection: Settings controlling the eviction of unhealthy + // hosts from the load balancing pool for the backend service. If not + // set, this feature is considered disabled. + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` + // Port: Deprecated in favor of portName. The TCP port to connect on the // backend. The default value is 80. // - // This cannot be used for internal load balancing. + // This cannot be used if the loadBalancingScheme is INTERNAL (Internal + // TCP/UDP Load Balancing). Port int64 `json:"port,omitempty"` - // PortName: Name of backend port. The same name should appear in the - // instance groups referenced by this service. Required when the load - // balancing scheme is EXTERNAL. + // PortName: A named port on a backend instance group representing the + // port for communication to the backend VMs in that group. Required + // when the loadBalancingScheme is EXTERNAL and the backends are + // instance groups. The named port must be defined on each backend + // instance group. This parameter has no meaning if the backends are + // NEGs. // - // When the load balancing scheme is INTERNAL, this field is not used. + // + // + // Must be omitted when the loadBalancingScheme is INTERNAL (Internal + // TCP/UDP Load Blaancing). PortName string `json:"portName,omitempty"` // Protocol: The protocol this BackendService uses to communicate with // backends. // - // Possible values are HTTP, HTTPS, TCP, and SSL. The default is - // HTTP. - // - // For internal load balancing, the possible values are TCP and UDP, and - // the default is TCP. + // Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, or UDP, depending + // on the chosen load balancer or Traffic Director configuration. Refer + // to the documentation for the load balancer or for Traffic Director + // for more information. // // Possible values: // "HTTP" @@ -3875,26 +4355,33 @@ type BackendService struct { SelfLink string `json:"selfLink,omitempty"` // SessionAffinity: Type of session affinity to use. The default is - // NONE. + // NONE. Session affinity is not applicable if the --protocol is + // UDP. // - // When the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, - // or GENERATED_COOKIE. + // When the loadBalancingScheme is EXTERNAL, possible values are NONE, + // CLIENT_IP, or GENERATED_COOKIE. You can use GENERATED_COOKIE if the + // protocol is HTTP or HTTPS. // - // When the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, - // CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO. + // When the loadBalancingScheme is INTERNAL, possible values are NONE, + // CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO. // - // When the protocol is UDP, this field is not used. + // When the loadBalancingScheme is INTERNAL_SELF_MANAGED, possible + // values are NONE, CLIENT_IP, GENERATED_COOKIE, HEADER_FIELD, or + // HTTP_COOKIE. // // Possible values: // "CLIENT_IP" // "CLIENT_IP_PORT_PROTO" // "CLIENT_IP_PROTO" // "GENERATED_COOKIE" + // "HEADER_FIELD" + // "HTTP_COOKIE" // "NONE" SessionAffinity string `json:"sessionAffinity,omitempty"` - // TimeoutSec: How many seconds to wait for the backend before - // considering it a failed request. Default is 30 seconds. + // TimeoutSec: The backend service timeout has a different meaning + // depending on the type of load balancer. For more information read, + // Backend service settings The default is 30 seconds. TimeoutSec int64 `json:"timeoutSec,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4534,7 +5021,7 @@ type Binding struct { // account. // // * `user:{emailid}`: An email address that represents a specific - // Google account. For example, `alice@gmail.com` . + // Google account. For example, `alice@example.com` . // // // @@ -4661,17 +5148,63 @@ func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Commitment: Represents a Commitment resource. Creating a Commitment -// resource means that you are purchasing a committed use contract with -// an explicit start and end time. You can create commitments based on -// vCPUs and memory usage and receive discounted rates. For full -// details, read Signing Up for Committed Use Discounts. +// CircuitBreakers: Settings controlling the volume of connections to a +// backend service. +type CircuitBreakers struct { + // MaxConnections: The maximum number of connections to the backend + // service. If not specified, there is no limit. + MaxConnections int64 `json:"maxConnections,omitempty"` + + // MaxPendingRequests: The maximum number of pending requests allowed to + // the backend service. If not specified, there is no limit. + MaxPendingRequests int64 `json:"maxPendingRequests,omitempty"` + + // MaxRequests: The maximum number of parallel requests that allowed to + // the backend service. If not specified, there is no limit. + MaxRequests int64 `json:"maxRequests,omitempty"` + + // MaxRequestsPerConnection: Maximum requests for a single connection to + // the backend service. This parameter is respected by both the HTTP/1.1 + // and HTTP/2 implementations. If not specified, there is no limit. + // Setting this parameter to 1 will effectively disable keep alive. + MaxRequestsPerConnection int64 `json:"maxRequestsPerConnection,omitempty"` + + // MaxRetries: The maximum number of parallel retries allowed to the + // backend cluster. If not specified, the default is 1. + MaxRetries int64 `json:"maxRetries,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxConnections") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxConnections") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CircuitBreakers) MarshalJSON() ([]byte, error) { + type NoMethod CircuitBreakers + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Commitment: Represents a regional Commitment resource. // -// Committed use discounts are subject to Google Cloud Platform's -// Service Specific Terms. By purchasing a committed use discount, you -// agree to these terms. Committed use discounts will not renew, so you -// must purchase a new commitment to continue receiving discounts. (== -// resource_for beta.commitments ==) (== resource_for v1.commitments ==) +// Creating a commitment resource means that you are purchasing a +// committed use contract with an explicit start and end time. You can +// create commitments based on vCPUs and memory usage and receive +// discounted rates. For full details, read Signing Up for Committed Use +// Discounts. (== resource_for beta.regionCommitments ==) (== +// resource_for v1.regionCommitments ==) type Commitment struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -4716,6 +5249,9 @@ type Commitment struct { // used. Region string `json:"region,omitempty"` + // Reservations: List of reservations in this commitment. + Reservations []*Reservation `json:"reservations,omitempty"` + // Resources: A list of commitment amounts for particular resources. // Note that VCPU and MEMORY resource commitments must occur together. Resources []*ResourceCommitment `json:"resources,omitempty"` @@ -5282,8 +5818,9 @@ func (s *Condition) MarshalJSON() ([]byte, error) { // ConnectionDraining: Message containing connection draining // configuration. type ConnectionDraining struct { - // DrainingTimeoutSec: Time for which instance will be drained (not - // accept new connections, but still work to finish started). + // DrainingTimeoutSec: The amount of time in seconds to allow existing + // connections to persist while on unhealthy backend VMs. Only + // applicable if the protocol is not UDP. The valid range is [0, 3600]. DrainingTimeoutSec int64 `json:"drainingTimeoutSec,omitempty"` // ForceSendFields is a list of field names (e.g. "DrainingTimeoutSec") @@ -5310,6 +5847,155 @@ func (s *ConnectionDraining) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ConsistentHashLoadBalancerSettings: This message defines settings for +// a consistent hash style load balancer. +type ConsistentHashLoadBalancerSettings struct { + // HttpCookie: Hash is based on HTTP Cookie. This field describes a HTTP + // cookie that will be used as the hash key for the consistent hash load + // balancer. If the cookie is not present, it will be generated. This + // field is applicable if the sessionAffinity is set to HTTP_COOKIE. + HttpCookie *ConsistentHashLoadBalancerSettingsHttpCookie `json:"httpCookie,omitempty"` + + // HttpHeaderName: The hash based on the value of the specified header + // field. This field is applicable if the sessionAffinity is set to + // HEADER_FIELD. + HttpHeaderName string `json:"httpHeaderName,omitempty"` + + // MinimumRingSize: The minimum number of virtual nodes to use for the + // hash ring. Defaults to 1024. Larger ring sizes result in more + // granular load distributions. If the number of hosts in the load + // balancing pool is larger than the ring size, each host will be + // assigned a single virtual node. + MinimumRingSize int64 `json:"minimumRingSize,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "HttpCookie") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HttpCookie") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConsistentHashLoadBalancerSettings) MarshalJSON() ([]byte, error) { + type NoMethod ConsistentHashLoadBalancerSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ConsistentHashLoadBalancerSettingsHttpCookie: The information about +// the HTTP Cookie on which the hash function is based for load +// balancing policies that use a consistent hash. +type ConsistentHashLoadBalancerSettingsHttpCookie struct { + // Name: Name of the cookie. + Name string `json:"name,omitempty"` + + // Path: Path to set for the cookie. + Path string `json:"path,omitempty"` + + // Ttl: Lifetime of the cookie. + Ttl *Duration `json:"ttl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConsistentHashLoadBalancerSettingsHttpCookie) MarshalJSON() ([]byte, error) { + type NoMethod ConsistentHashLoadBalancerSettingsHttpCookie + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CorsPolicy: The specification for allowing client side cross-origin +// requests. Please see W3C Recommendation for Cross Origin Resource +// Sharing +type CorsPolicy struct { + // AllowCredentials: In response to a preflight request, setting this to + // true indicates that the actual request can include user credentials. + // This translates to the Access-Control-Allow-Credentials + // header. + // Default is false. + AllowCredentials bool `json:"allowCredentials,omitempty"` + + // AllowHeaders: Specifies the content for the + // Access-Control-Allow-Headers header. + AllowHeaders []string `json:"allowHeaders,omitempty"` + + // AllowMethods: Specifies the content for the + // Access-Control-Allow-Methods header. + AllowMethods []string `json:"allowMethods,omitempty"` + + // AllowOriginRegexes: Specifies the regualar expression patterns that + // match allowed origins. For regular expression grammar please see + // en.cppreference.com/w/cpp/regex/ecmascript + // An origin is allowed if it matches either allow_origins or + // allow_origin_regex. + AllowOriginRegexes []string `json:"allowOriginRegexes,omitempty"` + + // AllowOrigins: Specifies the list of origins that will be allowed to + // do CORS requests. + // An origin is allowed if it matches either allow_origins or + // allow_origin_regex. + AllowOrigins []string `json:"allowOrigins,omitempty"` + + // Disabled: If true, specifies the CORS policy is disabled. The default + // value of false, which indicates that the CORS policy is in effect. + Disabled bool `json:"disabled,omitempty"` + + // ExposeHeaders: Specifies the content for the + // Access-Control-Expose-Headers header. + ExposeHeaders []string `json:"exposeHeaders,omitempty"` + + // MaxAge: Specifies how long the results of a preflight request can be + // cached. This translates to the content for the Access-Control-Max-Age + // header. + MaxAge int64 `json:"maxAge,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowCredentials") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowCredentials") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CorsPolicy) MarshalJSON() ([]byte, error) { + type NoMethod CorsPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CustomerEncryptionKey: Represents a customer-supplied encryption key type CustomerEncryptionKey struct { // KmsKeyName: The name of the encryption key that is stored in Google @@ -5444,8 +6130,20 @@ func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Disk: A Disk resource. (== resource_for beta.disks ==) (== -// resource_for v1.disks ==) +// Disk: Represents a Persistent Disk resource. +// +// Persistent disks are required for running your VM instances. Create +// both boot and non-boot (data) persistent disks. For more information, +// read Persistent Disks. For more storage options, read Storage +// options. +// +// The disks resource represents a zonal persistent disk. For more +// information, read Zonal persistent disks. +// +// The regionDisks resource represents a regional persistent disk. For +// more information, read Regional resources. (== resource_for +// beta.disks ==) (== resource_for v1.disks ==) (== resource_for +// v1.regionDisks ==) (== resource_for beta.regionDisks ==) type Disk struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -5460,8 +6158,8 @@ type Disk struct { // // After you encrypt a disk with a customer-supplied key, you must // provide the same key if you use the disk later (e.g. to create a disk - // snapshot or an image, or to attach the disk to a virtual - // machine). + // snapshot, to create a disk image, to create a machine image, or to + // attach the disk to a virtual machine). // // Customer-supplied encryption keys do not protect access to metadata // of the disk. @@ -5545,6 +6243,10 @@ type Disk struct { // to. Only applicable for regional resources. ReplicaZones []string `json:"replicaZones,omitempty"` + // ResourcePolicies: Resource policies applied to this disk for + // automatic snapshot creations. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // SelfLink: [Output Only] Server-defined fully-qualified URL for this // resource. SelfLink string `json:"selfLink,omitempty"` @@ -5625,7 +6327,10 @@ type Disk struct { // version of the snapshot that was used. SourceSnapshotId string `json:"sourceSnapshotId,omitempty"` - // Status: [Output Only] The status of disk creation. + // Status: [Output Only] The status of disk creation. CREATING: Disk is + // provisioning. RESTORING: Source data is being copied into the disk. + // FAILED: Disk creation failed. READY: Disk is ready for use. DELETING: + // Disk is deleting. // // Possible values: // "CREATING" @@ -5637,11 +6342,11 @@ type Disk struct { // Type: URL of the disk type resource describing which disk type to use // to create the disk. Provide this when creating the disk. For example: - // project/zones/zone/diskTypes/pd-standard or pd-ssd + // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd Type string `json:"type,omitempty"` // Users: [Output Only] Links to the users of the disk (attached - // instances) in form: project/zones/zone/instances/instance + // instances) in form: projects/project/zones/zone/instances/instance Users []string `json:"users,omitempty"` // Zone: [Output Only] URL of the zone where the disk resides. You must @@ -6095,8 +6800,19 @@ func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DiskType: A DiskType resource. (== resource_for beta.diskTypes ==) -// (== resource_for v1.diskTypes ==) +// DiskType: Represents a Disk Type resource. +// +// You can choose from a variety of disk types based on your needs. For +// more information, read Storage options. +// +// The diskTypes resource represents disk types for a zonal persistent +// disk. For more information, read Zonal persistent disks. +// +// The regionDiskTypes resource represents disk types for a regional +// persistent disk. For more information, read Regional persistent +// disks. (== resource_for beta.diskTypes ==) (== resource_for +// v1.diskTypes ==) (== resource_for v1.regionDiskTypes ==) (== +// resource_for beta.regionDiskTypes ==) type DiskType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -6614,6 +7330,63 @@ func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type DisksAddResourcePoliciesRequest struct { + // ResourcePolicies: Resource policies to be added to this disk. + // Currently you can only specify one policy here. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourcePolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DisksAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { + type NoMethod DisksAddResourcePoliciesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DisksRemoveResourcePoliciesRequest struct { + // ResourcePolicies: Resource policies to be removed from this disk. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourcePolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DisksRemoveResourcePoliciesRequest) MarshalJSON() ([]byte, error) { + type NoMethod DisksRemoveResourcePoliciesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type DisksResizeRequest struct { // SizeGb: The new size of the persistent disk, which is specified in // GB. @@ -6775,6 +7548,34 @@ func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DisplayDevice: A set of Display Device options +type DisplayDevice struct { + // EnableDisplay: Defines whether the instance has Display enabled. + EnableDisplay bool `json:"enableDisplay,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EnableDisplay") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnableDisplay") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DisplayDevice) MarshalJSON() ([]byte, error) { + type NoMethod DisplayDevice + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type DistributionPolicy struct { // Zones: Zones where the regional managed instance group will create // and manage instances. @@ -6831,6 +7632,45 @@ func (s *DistributionPolicyZoneConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Duration: A Duration represents a fixed-length span of time +// represented as a count of seconds and fractions of seconds at +// nanosecond resolution. It is independent of any calendar and concepts +// like "day" or "month". Range is approximately 10,000 years. +type Duration struct { + // Nanos: Span of time that's a fraction of a second at nanosecond + // resolution. Durations less than one second are represented with a 0 + // `seconds` field and a positive `nanos` field. Must be from 0 to + // 999,999,999 inclusive. + Nanos int64 `json:"nanos,omitempty"` + + // Seconds: Span of time at a resolution of a second. Must be from 0 to + // 315,576,000,000 inclusive. Note: these bounds are computed from: 60 + // sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `json:"seconds,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "Nanos") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Nanos") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Duration) MarshalJSON() ([]byte, error) { + type NoMethod Duration + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Expr: Represents an expression text. Example: // // title: "User account presence" description: "Determines whether the @@ -6881,7 +7721,300 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Firewall: Represents a Firewall resource. +// ExternalVpnGateway: External VPN gateway is the on-premises VPN +// gateway(s) or another cloud provider?s VPN gateway that connects to +// your Google Cloud VPN gateway. To create a highly available VPN from +// Google Cloud to your on-premises side or another Cloud provider's VPN +// gateway, you must create a external VPN gateway resource in GCP, +// which provides the information to GCP about your external VPN +// gateway. +type ExternalVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Interfaces: List of interfaces for this external VPN gateway. + Interfaces []*ExternalVpnGatewayInterface `json:"interfaces,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#externalVpnGateway for externalVpnGateways. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // ExternalVpnGateway, which is essentially a hash of the labels set + // used for optimistic locking. The fingerprint is initially generated + // by Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels, otherwise the request will fail + // with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve an + // ExternalVpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this ExternalVpnGateway resource. These + // can be later modified by the setLabels method. Each label key/value + // must comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // RedundancyType: Indicates the user-supplied redundancy type of this + // external VPN gateway. + // + // Possible values: + // "FOUR_IPS_REDUNDANCY" + // "SINGLE_IP_INTERNALLY_REDUNDANT" + // "TWO_IPS_REDUNDANCY" + RedundancyType string `json:"redundancyType,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGatewayInterface: The interface for the external VPN +// gateway. +type ExternalVpnGatewayInterface struct { + // Id: The numeric ID of this interface. The allowed input values for + // this id for different redundancy types of external VPN gateway: + // SINGLE_IP_INTERNALLY_REDUNDANT - 0 TWO_IPS_REDUNDANCY - 0, 1 + // FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 + Id int64 `json:"id,omitempty"` + + // IpAddress: IP address of the interface in the external VPN gateway. + // Only IPv4 is supported. This IP address can be either from your + // on-premise gateway or another Cloud provider?s VPN gateway, it cannot + // be an IP address from Google Compute Engine. + IpAddress string `json:"ipAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayInterface) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayInterface + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGatewayList: Response to the list request, and contains a +// list of externalVpnGateways. +type ExternalVpnGatewayList struct { + Etag string `json:"etag,omitempty"` + + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of ExternalVpnGateway resources. + Items []*ExternalVpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#externalVpnGatewayList for lists of externalVpnGateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ExternalVpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGatewayListWarning: [Output Only] Informational warning +// message. +type ExternalVpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ExternalVpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ExternalVpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Firewall: Represents a Firewall Rule resource. +// +// Firewall rules allow or deny ingress traffic to, and egress traffic +// from your instances. For more information, read Firewall rules. type Firewall struct { // Allowed: The list of ALLOW rules specified by this firewall. Each // rule specifies a protocol and port-range tuple that describes a @@ -6898,29 +8031,30 @@ type Firewall struct { Denied []*FirewallDenied `json:"denied,omitempty"` // Description: An optional description of this resource. Provide this - // property when you create the resource. + // field when you create the resource. Description string `json:"description,omitempty"` // DestinationRanges: If destination ranges are specified, the firewall - // will apply only to traffic that has destination IP address in these + // rule applies only to traffic that has destination IP address in these // ranges. These ranges must be expressed in CIDR format. Only IPv4 is // supported. DestinationRanges []string `json:"destinationRanges,omitempty"` - // Direction: Direction of traffic to which this firewall applies; - // default is INGRESS. Note: For INGRESS traffic, it is NOT supported to - // specify destinationRanges; For EGRESS traffic, it is NOT supported to - // specify sourceRanges OR sourceTags. + // Direction: Direction of traffic to which this firewall applies, + // either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `INGRESS` + // traffic, you cannot specify the destinationRanges field, and for + // `EGRESS` traffic, you cannot specify the sourceRanges or sourceTags + // fields. // // Possible values: // "EGRESS" // "INGRESS" Direction string `json:"direction,omitempty"` - // Disabled: Denotes whether the firewall rule is disabled, i.e not - // applied to the network it is associated with. When set to true, the - // firewall rule is not enforced and the network behaves as if it did - // not exist. If this is unspecified, the firewall rule will be enabled. + // Disabled: Denotes whether the firewall rule is disabled. When set to + // true, the firewall rule is not enforced and the network behaves as if + // it did not exist. If this is unspecified, the firewall rule will be + // enabled. Disabled bool `json:"disabled,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -6939,58 +8073,61 @@ type Firewall struct { // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?. The first + // character must be a lowercase letter, and all following characters + // (except for the last character) must be a dash, lowercase letter, or + // digit. The last character must be a lowercase letter or digit. Name string `json:"name,omitempty"` // Network: URL of the network resource for this firewall rule. If not // specified when creating a firewall rule, the default network is // used: // global/networks/default - // If you choose to specify this property, you can specify the network - // as a full or partial URL. For example, the following are all valid - // URLs: + // If you choose to specify this field, you can specify the network as a + // full or partial URL. For example, the following are all valid URLs: + // // - // https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network // - projects/myproject/global/networks/my-network // - global/networks/default Network string `json:"network,omitempty"` - // Priority: Priority for this rule. This is an integer between 0 and - // 65535, both inclusive. When not specified, the value assumed is 1000. - // Relative priorities determine precedence of conflicting rules. Lower - // value of priority implies higher precedence (eg, a rule with priority - // 0 has higher precedence than a rule with priority 1). DENY rules take - // precedence over ALLOW rules having equal priority. + // Priority: Priority for this rule. This is an integer between `0` and + // `65535`, both inclusive. The default value is `1000`. Relative + // priorities determine which rule takes effect if multiple rules apply. + // Lower values indicate higher priority. For example, a rule with + // priority `0` has higher precedence than a rule with priority `1`. + // DENY rules take precedence over ALLOW rules if they have equal + // priority. Note that VPC networks have implied rules with a priority + // of `65535`. To avoid conflicts with the implied rules, use a priority + // number less than `65535`. Priority int64 `json:"priority,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SourceRanges: If source ranges are specified, the firewall will apply - // only to traffic that has source IP address in these ranges. These - // ranges must be expressed in CIDR format. One or both of sourceRanges - // and sourceTags may be set. If both properties are set, the firewall - // will apply to traffic that has source IP address within sourceRanges - // OR the source IP that belongs to a tag listed in the sourceTags - // property. The connection does not need to match both properties for - // the firewall to apply. Only IPv4 is supported. + // SourceRanges: If source ranges are specified, the firewall rule + // applies only to traffic that has a source IP address in these ranges. + // These ranges must be expressed in CIDR format. One or both of + // sourceRanges and sourceTags may be set. If both fields are set, the + // rule applies to traffic that has a source IP address within + // sourceRanges OR a source IP from a resource with a matching tag + // listed in the sourceTags field. The connection does not need to match + // both fields for the rule to apply. Only IPv4 is supported. SourceRanges []string `json:"sourceRanges,omitempty"` // SourceServiceAccounts: If source service accounts are specified, the - // firewall will apply only to traffic originating from an instance with - // a service account in this list. Source service accounts cannot be - // used to control traffic to an instance's external IP address because - // service accounts are associated with an instance, not an IP address. - // sourceRanges can be set at the same time as sourceServiceAccounts. If - // both are set, the firewall will apply to traffic that has source IP - // address within sourceRanges OR the source IP belongs to an instance - // with service account listed in sourceServiceAccount. The connection - // does not need to match both properties for the firewall to apply. - // sourceServiceAccounts cannot be used at the same time as sourceTags - // or targetTags. + // firewall rules apply only to traffic originating from an instance + // with a service account in this list. Source service accounts cannot + // be used to control traffic to an instance's external IP address + // because service accounts are associated with an instance, not an IP + // address. sourceRanges can be set at the same time as + // sourceServiceAccounts. If both are set, the firewall applies to + // traffic that has a source IP address within the sourceRanges OR a + // source IP that belongs to an instance with service account listed in + // sourceServiceAccount. The connection does not need to match both + // fields for the firewall to apply. sourceServiceAccounts cannot be + // used at the same time as sourceTags or targetTags. SourceServiceAccounts []string `json:"sourceServiceAccounts,omitempty"` // SourceTags: If source tags are specified, the firewall rule applies @@ -7000,11 +8137,11 @@ type Firewall struct { // instance's external IP address, it only applies to traffic between // instances in the same virtual network. Because tags are associated // with instances, not IP addresses. One or both of sourceRanges and - // sourceTags may be set. If both properties are set, the firewall will - // apply to traffic that has source IP address within sourceRanges OR - // the source IP that belongs to a tag listed in the sourceTags - // property. The connection does not need to match both properties for - // the firewall to apply. + // sourceTags may be set. If both fields are set, the firewall applies + // to traffic that has a source IP address within sourceRanges OR a + // source IP from a resource with a matching tag listed in the + // sourceTags field. The connection does not need to match both fields + // for the firewall to apply. SourceTags []string `json:"sourceTags,omitempty"` // TargetServiceAccounts: A list of service accounts indicating sets of @@ -7053,13 +8190,13 @@ type FirewallAllowed struct { // IPProtocol: The IP protocol to which this rule applies. The protocol // type is required when creating a firewall rule. This value can either // be one of the following well known protocol strings (tcp, udp, icmp, - // esp, ah, ipip, sctp), or the IP protocol number. + // esp, ah, ipip, sctp) or the IP protocol number. IPProtocol string `json:"IPProtocol,omitempty"` // Ports: An optional list of ports to which this rule applies. This - // field is only applicable for UDP or TCP protocol. Each entry must be - // either an integer or a range. If not specified, this rule applies to - // connections through any port. + // field is only applicable for the UDP or TCP protocol. Each entry must + // be either an integer or a range. If not specified, this rule applies + // to connections through any port. // // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. Ports []string `json:"ports,omitempty"` @@ -7091,13 +8228,13 @@ type FirewallDenied struct { // IPProtocol: The IP protocol to which this rule applies. The protocol // type is required when creating a firewall rule. This value can either // be one of the following well known protocol strings (tcp, udp, icmp, - // esp, ah, ipip, sctp), or the IP protocol number. + // esp, ah, ipip, sctp) or the IP protocol number. IPProtocol string `json:"IPProtocol,omitempty"` // Ports: An optional list of ports to which this rule applies. This - // field is only applicable for UDP or TCP protocol. Each entry must be - // either an integer or a range. If not specified, this rule applies to - // connections through any port. + // field is only applicable for the UDP or TCP protocol. Each entry must + // be either an integer or a range. If not specified, this rule applies + // to connections through any port. // // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. Ports []string `json:"ports,omitempty"` @@ -7355,59 +8492,50 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ForwardingRule: A ForwardingRule resource. A ForwardingRule resource -// specifies which pool of target virtual machines to forward a packet -// to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== -// resource_for beta.forwardingRules ==) (== resource_for +// ForwardingRule: Represents a Forwarding Rule resource. +// +// A forwarding rule and its corresponding IP address represent the +// frontend configuration of a Google Cloud Platform load balancer. +// Forwarding rules can also reference target instances and Cloud VPN +// Classic gateways (targetVpnGateway). +// +// For more information, read Forwarding rule concepts and Using +// protocol forwarding. +// +// (== resource_for beta.forwardingRules ==) (== resource_for // v1.forwardingRules ==) (== resource_for beta.globalForwardingRules // ==) (== resource_for v1.globalForwardingRules ==) (== resource_for // beta.regionForwardingRules ==) (== resource_for // v1.regionForwardingRules ==) type ForwardingRule struct { - // IPAddress: The IP address that this forwarding rule is serving on - // behalf of. + // IPAddress: IP address that this forwarding rule serves. When a client + // sends traffic to this IP address, the forwarding rule directs the + // traffic to the target that you specify in the forwarding rule. // - // Addresses are restricted based on the forwarding rule's load - // balancing scheme (EXTERNAL or INTERNAL) and scope (global or - // regional). + // If you don't specify a reserved IP address, an ephemeral IP address + // is assigned. Methods for specifying an IP address: // - // When the load balancing scheme is EXTERNAL, for global forwarding - // rules, the address must be a global IP, and for regional forwarding - // rules, the address must live in the same region as the forwarding - // rule. If this field is empty, an ephemeral IPv4 address from the same - // scope (global or regional) will be assigned. A regional forwarding - // rule supports IPv4 only. A global forwarding rule supports either - // IPv4 or IPv6. + // * IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in + // https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: * projects/project_id/regions/region/addresses/address-name * regions/region/addresses/address-name * global/addresses/address-name * address-name // - // When the load balancing scheme is INTERNAL_SELF_MANAGED, this must be - // a URL reference to an existing Address resource ( internal regional - // static IP address), with a purpose of GCE_END_POINT and address_type - // of INTERNAL. - // - // When the load balancing scheme is INTERNAL, this can only be an RFC - // 1918 IP address belonging to the network/subnet configured for the - // forwarding rule. By default, if this field is empty, an ephemeral - // internal IP address will be automatically allocated from the IP range - // of the subnet or network configured for this forwarding rule. - // - // An address can be specified either by a literal IP address or a URL - // reference to an existing Address resource. The following examples are - // all valid: - // - 100.1.2.3 - // - - // https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address - // - projects/project/regions/region/addresses/address - // - regions/region/addresses/address - // - global/addresses/address - // - address + // The loadBalancingScheme and the forwarding rule's target determine + // the type of IP address that you can use. For detailed information, + // refer to [IP address + // specifications](/load-balancing/docs/forwarding-rule-concepts#ip_addre + // ss_specifications). IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. Valid options // are TCP, UDP, ESP, AH, SCTP or ICMP. // - // When the load balancing scheme is INTERNAL, only TCP and UDP are - // valid. When the load balancing scheme is INTERNAL_SELF_MANAGED, only - // TCPis valid. + // For Internal TCP/UDP Load Balancing, the load balancing scheme is + // INTERNAL, and one of TCP or UDP are valid. For Traffic Director, the + // load balancing scheme is INTERNAL_SELF_MANAGED, and only TCPis valid. + // For Internal HTTP(S) Load Balancing, the load balancing scheme is + // INTERNAL_MANAGED, and only TCP is valid. For HTTP(S), SSL Proxy, and + // TCP Proxy Load Balancing, the load balancing scheme is EXTERNAL and + // only TCP is valid. For Network TCP/UDP Load Balancing, the load + // balancing scheme is EXTERNAL, and one of TCP or UDP is valid. // // Possible values: // "AH" @@ -7461,21 +8589,47 @@ type ForwardingRule struct { // compute#forwardingRule for Forwarding Rule resources. Kind string `json:"kind,omitempty"` - // LoadBalancingScheme: This signifies what the ForwardingRule will be - // used for and can only take the following values: INTERNAL, - // INTERNAL_SELF_MANAGED, EXTERNAL. The value of INTERNAL means that - // this will be used for Internal Network Load Balancing (TCP, UDP). The - // value of INTERNAL_SELF_MANAGED means that this will be used for - // Internal Global HTTP(S) LB. The value of EXTERNAL means that this - // will be used for External Load Balancing (HTTP(S) LB, External - // TCP/UDP LB, SSL Proxy) + // LoadBalancingScheme: Specifies the forwarding rule type. EXTERNAL is + // used for: - Classic Cloud VPN gateways - Protocol forwarding to VMs + // from an external IP address - The following load balancers: HTTP(S), + // SSL Proxy, TCP Proxy, and Network TCP/UDP. + // + // INTERNAL is used for: - Protocol forwarding to VMs from an internal + // IP address - Internal TCP/UDP load balancers + // + // INTERNAL_MANAGED is used for: - Internal HTTP(S) load + // balancers + // + // INTERNAL_SELF_MANAGED is used for: - Traffic Director + // + // For more information about forwarding rules, refer to Forwarding rule + // concepts. // // Possible values: // "EXTERNAL" // "INTERNAL" + // "INTERNAL_MANAGED" + // "INTERNAL_SELF_MANAGED" // "INVALID" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` + // MetadataFilters: Opaque filter criteria used by Loadbalancer to + // restrict routing configuration to a limited set xDS compliant + // clients. In their xDS requests to Loadbalancer, xDS clients present + // node metadata. If a match takes place, the relevant routing + // configuration is made available to those proxies. + // For each metadataFilter in this list, if its filterMatchCriteria is + // set to MATCH_ANY, at least one of the filterLabels must match the + // corresponding label provided in the metadata. If its + // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels + // must match with corresponding labels in the provided + // metadata. + // metadataFilters specified here can be overridden by those specified + // in the UrlMap that this ForwardingRule references. + // metadataFilters only applies to Loadbalancers that have their + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -7509,17 +8663,24 @@ type ForwardingRule struct { // "STANDARD" NetworkTier string `json:"networkTier,omitempty"` - // PortRange: This field is used along with the target field for - // TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, - // TargetVpnGateway, TargetPool, TargetInstance. + // PortRange: This field is deprecated. See the port + // field. + PortRange string `json:"portRange,omitempty"` + + // Ports: List of comma-separated ports. The forwarding rule forwards + // packets with matching destination ports. If the forwarding rule's + // loadBalancingScheme is EXTERNAL, and the forwarding rule references a + // target pool, specifying ports is optional. You can specify an + // unlimited number of ports, but they must be contiguous. If you omit + // ports, GCP forwards traffic on any port of the forwarding rule's + // protocol. + // + // If the forwarding rule's loadBalancingScheme is EXTERNAL, and the + // forwarding rule references a target HTTP proxy, target HTTPS proxy, + // target TCP proxy, target SSL proxy, or target VPN gateway, you must + // specify ports using the following constraints: // - // Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets - // addressed to ports in the specified range will be forwarded to - // target. Forwarding rules with the same [IPAddress, IPProtocol] pair - // must have disjoint port ranges. // - // Some types of forwarding target have constraints on the acceptable - // ports: // - TargetHttpProxy: 80, 8080 // - TargetHttpsProxy: 443 // - TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, @@ -7527,17 +8688,21 @@ type ForwardingRule struct { // - TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, // 995, 1688, 1883, 5222 // - TargetVpnGateway: 500, 4500 - PortRange string `json:"portRange,omitempty"` - - // Ports: This field is used along with the backend_service field for - // internal load balancing. // - // When the load balancing scheme is INTERNAL, a list of ports can be - // configured, for example, ['80'], ['8000','9000'] etc. Only packets - // addressed to these ports will be forwarded to the backends configured - // with this forwarding rule. + // If the forwarding rule's loadBalancingScheme is INTERNAL, you must + // specify ports in one of the following ways: // - // You may specify a maximum of up to 5 ports. + // * A list of up to five ports, which can be non-contiguous * Keyword + // ALL, which causes the forwarding rule to forward traffic on any port + // of the forwarding rule's protocol. + // + // The ports field is used along with the target field for + // TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, + // TargetVpnGateway, TargetPool, TargetInstance. + // + // Applicable only when IPProtocol is TCP, UDP, or SCTP. Forwarding + // rules with the same [IPAddress, IPProtocol] pair must have disjoint + // port ranges. Ports []string `json:"ports,omitempty"` // Region: [Output Only] URL of the region where the regional forwarding @@ -7550,8 +8715,8 @@ type ForwardingRule struct { SelfLink string `json:"selfLink,omitempty"` // ServiceLabel: An optional prefix to the service name for this - // Forwarding Rule. If specified, will be the first label of the fully - // qualified service name. + // Forwarding Rule. If specified, the prefix is the first label of the + // fully qualified service name. // // The label must be 1-63 characters long, and comply with RFC1035. // Specifically, the label must be 1-63 characters long and match the @@ -8169,6 +9334,117 @@ func (s *GlobalSetPolicyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GuestAttributes: A guest attributes entry. +type GuestAttributes struct { + // Kind: [Output Only] Type of the resource. Always + // compute#guestAttributes for guest attributes entry. + Kind string `json:"kind,omitempty"` + + // QueryPath: The path to be queried. This can be the default namespace + // ('/') or a nested namespace ('//') or a specified key ('//') + QueryPath string `json:"queryPath,omitempty"` + + // QueryValue: [Output Only] The value of the requested queried path. + QueryValue *GuestAttributesValue `json:"queryValue,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // VariableKey: The key to search for. + VariableKey string `json:"variableKey,omitempty"` + + // VariableValue: [Output Only] The value found for the requested key. + VariableValue string `json:"variableValue,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GuestAttributes) MarshalJSON() ([]byte, error) { + type NoMethod GuestAttributes + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GuestAttributesEntry: A guest attributes namespace/key/value entry. +type GuestAttributesEntry struct { + // Key: Key for the guest attribute entry. + Key string `json:"key,omitempty"` + + // Namespace: Namespace for the guest attribute entry. + Namespace string `json:"namespace,omitempty"` + + // Value: Value for the guest attribute entry. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GuestAttributesEntry) MarshalJSON() ([]byte, error) { + type NoMethod GuestAttributesEntry + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GuestAttributesValue: Array of guest attribute namespace/key/value +// tuples. +type GuestAttributesValue struct { + Items []*GuestAttributesEntry `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GuestAttributesValue) MarshalJSON() ([]byte, error) { + type NoMethod GuestAttributesValue + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GuestOsFeature: Guest OS features. type GuestOsFeature struct { // Type: The ID of a supported feature. Read Enabling guest operating @@ -8452,9 +9728,14 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HealthCheck: An HealthCheck resource. This resource defines a -// template for how individual virtual machines should be checked for -// health, via one of the supported protocols. +// HealthCheck: Represents a Health Check resource. +// +// Health checks are used for most GCP load balancers and managed +// instance group auto-healing. For more information, read Health Check +// Concepts. +// +// To perform health checks on network load balancers, you must use +// either httpHealthChecks or httpsHealthChecks. type HealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -8494,6 +9775,10 @@ type HealthCheck struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // Region: [Output Only] Region where the health check resides. Not + // applicable to global health checks. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -8739,6 +10024,293 @@ func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type HealthChecksAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of HealthChecksScopedList resources. + Items map[string]HealthChecksScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *HealthChecksAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HealthChecksAggregatedListWarning: [Output Only] Informational +// warning message. +type HealthChecksAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HealthChecksAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthChecksAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthChecksScopedList struct { + // HealthChecks: A list of HealthChecks contained in this scope. + HealthChecks []*HealthCheck `json:"healthChecks,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *HealthChecksScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksScopedList) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HealthChecksScopedListWarning: Informational warning which replaces +// the list of backend services when the list is empty. +type HealthChecksScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HealthChecksScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthChecksScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type HealthStatus struct { // HealthState: Health state of the instance. // @@ -8866,9 +10438,318 @@ func (s *HostRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HttpHealthCheck: An HttpHealthCheck resource. This resource defines a -// template for how individual instances should be checked for health, -// via HTTP. +// HttpFaultAbort: Specification for how requests are aborted as part of +// fault injection. +type HttpFaultAbort struct { + // HttpStatus: The HTTP status code used to abort the request. + // The value must be between 200 and 599 inclusive. + HttpStatus int64 `json:"httpStatus,omitempty"` + + // Percentage: The percentage of traffic + // (connections/operations/requests) which will be aborted as part of + // fault injection. + // The value must be between 0.0 and 100.0 inclusive. + Percentage float64 `json:"percentage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HttpStatus") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HttpStatus") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpFaultAbort) MarshalJSON() ([]byte, error) { + type NoMethod HttpFaultAbort + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *HttpFaultAbort) UnmarshalJSON(data []byte) error { + type NoMethod HttpFaultAbort + var s1 struct { + Percentage gensupport.JSONFloat64 `json:"percentage"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Percentage = float64(s1.Percentage) + return nil +} + +// HttpFaultDelay: Specifies the delay introduced by Loadbalancer before +// forwarding the request to the backend service as part of fault +// injection. +type HttpFaultDelay struct { + // FixedDelay: Specifies the value of the fixed delay interval. + FixedDelay *Duration `json:"fixedDelay,omitempty"` + + // Percentage: The percentage of traffic + // (connections/operations/requests) on which delay will be introduced + // as part of fault injection. + // The value must be between 0.0 and 100.0 inclusive. + Percentage float64 `json:"percentage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FixedDelay") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FixedDelay") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpFaultDelay) MarshalJSON() ([]byte, error) { + type NoMethod HttpFaultDelay + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *HttpFaultDelay) UnmarshalJSON(data []byte) error { + type NoMethod HttpFaultDelay + var s1 struct { + Percentage gensupport.JSONFloat64 `json:"percentage"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Percentage = float64(s1.Percentage) + return nil +} + +// HttpFaultInjection: The specification for fault injection introduced +// into traffic to test the resiliency of clients to backend service +// failure. As part of fault injection, when clients send requests to a +// backend service, delays can be introduced by Loadbalancer on a +// percentage of requests before sending those request to the backend +// service. Similarly requests from clients can be aborted by the +// Loadbalancer for a percentage of requests. +type HttpFaultInjection struct { + // Abort: The specification for how client requests are aborted as part + // of fault injection. + Abort *HttpFaultAbort `json:"abort,omitempty"` + + // Delay: The specification for how client requests are delayed as part + // of fault injection, before being sent to a backend service. + Delay *HttpFaultDelay `json:"delay,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Abort") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Abort") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpFaultInjection) MarshalJSON() ([]byte, error) { + type NoMethod HttpFaultInjection + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpHeaderAction: The request and response header transformations +// that take effect before the request is passed along to the selected +// backendService. +type HttpHeaderAction struct { + // RequestHeadersToAdd: Headers to add to a matching request prior to + // forwarding the request to the backendService. + RequestHeadersToAdd []*HttpHeaderOption `json:"requestHeadersToAdd,omitempty"` + + // RequestHeadersToRemove: A list of header names for headers that need + // to be removed from the request prior to forwarding the request to the + // backendService. + RequestHeadersToRemove []string `json:"requestHeadersToRemove,omitempty"` + + // ResponseHeadersToAdd: Headers to add the response prior to sending + // the response back to the client. + ResponseHeadersToAdd []*HttpHeaderOption `json:"responseHeadersToAdd,omitempty"` + + // ResponseHeadersToRemove: A list of header names for headers that need + // to be removed from the response prior to sending the response back to + // the client. + ResponseHeadersToRemove []string `json:"responseHeadersToRemove,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RequestHeadersToAdd") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequestHeadersToAdd") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *HttpHeaderAction) MarshalJSON() ([]byte, error) { + type NoMethod HttpHeaderAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpHeaderMatch: matchRule criteria for request header matches. +type HttpHeaderMatch struct { + // ExactMatch: The value should exactly match contents of + // exactMatch. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + ExactMatch string `json:"exactMatch,omitempty"` + + // HeaderName: The name of the HTTP header to match. + // For matching against the HTTP request's authority, use a headerMatch + // with the header name ":authority". + // For matching a request's method, use the headerName ":method". + HeaderName string `json:"headerName,omitempty"` + + // InvertMatch: If set to false, the headerMatch is considered a match + // if the match criteria above are met. If set to true, the headerMatch + // is considered a match if the match criteria above are NOT met. + // The default setting is false. + InvertMatch bool `json:"invertMatch,omitempty"` + + // PrefixMatch: The value of the header must start with the contents of + // prefixMatch. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + PrefixMatch string `json:"prefixMatch,omitempty"` + + // PresentMatch: A header with the contents of headerName must exist. + // The match takes place whether or not the request's header has a value + // or not. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + PresentMatch bool `json:"presentMatch,omitempty"` + + // RangeMatch: The header value must be an integer and its value must be + // in the range specified in rangeMatch. If the header does not contain + // an integer, number or is empty, the match fails. + // For example for a range [-5, 0] + // - -3 will match. + // - 0 will not match. + // - 0.25 will not match. + // - -3someString will not match. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + RangeMatch *Int64RangeMatch `json:"rangeMatch,omitempty"` + + // RegexMatch: The value of the header must match the regualar + // expression specified in regexMatch. For regular expression grammar, + // please see: en.cppreference.com/w/cpp/regex/ecmascript + // For matching against a port specified in the HTTP request, use a + // headerMatch with headerName set to PORT and a regular expression that + // satisfies the RFC2616 Host header's port specifier. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + RegexMatch string `json:"regexMatch,omitempty"` + + // SuffixMatch: The value of the header must end with the contents of + // suffixMatch. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + SuffixMatch string `json:"suffixMatch,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExactMatch") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExactMatch") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHeaderMatch) MarshalJSON() ([]byte, error) { + type NoMethod HttpHeaderMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpHeaderOption: Specification determining how headers are added to +// requests or responses. +type HttpHeaderOption struct { + // HeaderName: The name of the header. + HeaderName string `json:"headerName,omitempty"` + + // HeaderValue: The value of the header to add. + HeaderValue string `json:"headerValue,omitempty"` + + // Replace: If false, headerValue is appended to any values that already + // exist for the header. If true, headerValue is set for the header, + // discarding any values that were set for that header. + // The default value is false. + Replace bool `json:"replace,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HeaderName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HeaderName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHeaderOption) MarshalJSON() ([]byte, error) { + type NoMethod HttpHeaderOption + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpHealthCheck: Represents a legacy HTTP Health Check +// resource. +// +// Legacy health checks are required by network load balancers. For more +// information, read Health Check Concepts. type HttpHealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -8913,7 +10794,7 @@ type HttpHealthCheck struct { Port int64 `json:"port,omitempty"` // RequestPath: The request path of the HTTP health check request. The - // default value is /. + // default value is /. This field does not support query parameters. RequestPath string `json:"requestPath,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -9112,9 +10993,438 @@ func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines -// a template for how individual instances should be checked for health, -// via HTTPS. +// HttpQueryParameterMatch: HttpRouteRuleMatch criteria for a request's +// query parameter. +type HttpQueryParameterMatch struct { + // ExactMatch: The queryParameterMatch matches if the value of the + // parameter exactly matches the contents of exactMatch. + // Only one of presentMatch, exactMatch and regexMatch must be set. + ExactMatch string `json:"exactMatch,omitempty"` + + // Name: The name of the query parameter to match. The query parameter + // must exist in the request, in the absence of which the request match + // fails. + Name string `json:"name,omitempty"` + + // PresentMatch: Specifies that the queryParameterMatch matches if the + // request contains the query parameter, irrespective of whether the + // parameter has a value or not. + // Only one of presentMatch, exactMatch and regexMatch must be set. + PresentMatch bool `json:"presentMatch,omitempty"` + + // RegexMatch: The queryParameterMatch matches if the value of the + // parameter matches the regular expression specified by regexMatch. For + // the regular expression grammar, please see + // en.cppreference.com/w/cpp/regex/ecmascript + // Only one of presentMatch, exactMatch and regexMatch must be set. + RegexMatch string `json:"regexMatch,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExactMatch") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExactMatch") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpQueryParameterMatch) MarshalJSON() ([]byte, error) { + type NoMethod HttpQueryParameterMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpRedirectAction: Specifies settings for an HTTP redirect. +type HttpRedirectAction struct { + // HostRedirect: The host that will be used in the redirect response + // instead of the one that was supplied in the request. + // The value must be between 1 and 255 characters. + HostRedirect string `json:"hostRedirect,omitempty"` + + // HttpsRedirect: If set to true, the URL scheme in the redirected + // request is set to https. If set to false, the URL scheme of the + // redirected request will remain the same as that of the request. + // This must only be set for UrlMaps used in TargetHttpProxys. Setting + // this true for TargetHttpsProxy is not permitted. + // The default is set to false. + HttpsRedirect bool `json:"httpsRedirect,omitempty"` + + // PathRedirect: The path that will be used in the redirect response + // instead of the one that was supplied in the request. + // Only one of pathRedirect or prefixRedirect must be specified. + // The value must be between 1 and 1024 characters. + PathRedirect string `json:"pathRedirect,omitempty"` + + // PrefixRedirect: The prefix that replaces the prefixMatch specified in + // the HttpRouteRuleMatch, retaining the remaining portion of the URL + // before redirecting the request. + PrefixRedirect string `json:"prefixRedirect,omitempty"` + + // RedirectResponseCode: The HTTP Status code to use for this + // RedirectAction. + // Supported values are: + // - MOVED_PERMANENTLY_DEFAULT, which is the default value and + // corresponds to 301. + // - FOUND, which corresponds to 302. + // - SEE_OTHER which corresponds to 303. + // - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + // request method will be retained. + // - PERMANENT_REDIRECT, which corresponds to 308. In this case, the + // request method will be retained. + // + // Possible values: + // "FOUND" + // "MOVED_PERMANENTLY_DEFAULT" + // "PERMANENT_REDIRECT" + // "SEE_OTHER" + // "TEMPORARY_REDIRECT" + RedirectResponseCode string `json:"redirectResponseCode,omitempty"` + + // StripQuery: If set to true, any accompanying query portion of the + // original URL is removed prior to redirecting the request. If set to + // false, the query portion of the original URL is retained. + // The default is set to false. + StripQuery bool `json:"stripQuery,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HostRedirect") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HostRedirect") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRedirectAction) MarshalJSON() ([]byte, error) { + type NoMethod HttpRedirectAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpRetryPolicy: The retry policy associates with HttpRouteRule +type HttpRetryPolicy struct { + // NumRetries: Specifies the allowed number retries. This number must be + // > 0. If not specified, defaults to 1. + NumRetries int64 `json:"numRetries,omitempty"` + + // PerTryTimeout: Specifies a non-zero timeout per retry attempt. + // If not specified, will use the timeout set in HttpRouteAction. If + // timeout in HttpRouteAction is not set, will use the largest timeout + // among all backend services associated with the route. + PerTryTimeout *Duration `json:"perTryTimeout,omitempty"` + + // RetryConditions: Specfies one or more conditions when this retry rule + // applies. Valid values are: + // - 5xx: Loadbalancer will attempt a retry if the backend service + // responds with any 5xx response code, or if the backend service does + // not respond at all, example: disconnects, reset, read timeout, + // connection failure, and refused streams. + // - gateway-error: Similar to 5xx, but only applies to response codes + // 502, 503 or 504. + // - + // - connect-failure: Loadbalancer will retry on failures connecting to + // backend services, for example due to connection timeouts. + // - retriable-4xx: Loadbalancer will retry for retriable 4xx response + // codes. Currently the only retriable error supported is 409. + // - refused-stream:Loadbalancer will retry if the backend service + // resets the stream with a REFUSED_STREAM error code. This reset type + // indicates that it is safe to retry. + // - cancelledLoadbalancer will retry if the gRPC status code in the + // response header is set to cancelled + // - deadline-exceeded: Loadbalancer will retry if the gRPC status code + // in the response header is set to deadline-exceeded + // - resource-exhausted: Loadbalancer will retry if the gRPC status code + // in the response header is set to resource-exhausted + // - unavailable: Loadbalancer will retry if the gRPC status code in the + // response header is set to unavailable + RetryConditions []string `json:"retryConditions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NumRetries") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NumRetries") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRetryPolicy) MarshalJSON() ([]byte, error) { + type NoMethod HttpRetryPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpRouteAction struct { + // CorsPolicy: The specification for allowing client side cross-origin + // requests. Please see W3C Recommendation for Cross Origin Resource + // Sharing + CorsPolicy *CorsPolicy `json:"corsPolicy,omitempty"` + + // FaultInjectionPolicy: The specification for fault injection + // introduced into traffic to test the resiliency of clients to backend + // service failure. As part of fault injection, when clients send + // requests to a backend service, delays can be introduced by + // Loadbalancer on a percentage of requests before sending those request + // to the backend service. Similarly requests from clients can be + // aborted by the Loadbalancer for a percentage of requests. + // timeout and retry_policy will be ignored by clients that are + // configured with a fault_injection_policy. + FaultInjectionPolicy *HttpFaultInjection `json:"faultInjectionPolicy,omitempty"` + + // RequestMirrorPolicy: Specifies the policy on how requests intended + // for the route's backends are shadowed to a separate mirrored backend + // service. Loadbalancer does not wait for responses from the shadow + // service. Prior to sending traffic to the shadow service, the host / + // authority header is suffixed with -shadow. + RequestMirrorPolicy *RequestMirrorPolicy `json:"requestMirrorPolicy,omitempty"` + + // RetryPolicy: Specifies the retry policy associated with this route. + RetryPolicy *HttpRetryPolicy `json:"retryPolicy,omitempty"` + + // Timeout: Specifies the timeout for the selected route. Timeout is + // computed from the time the request has been fully processed (i.e. + // end-of-stream) up until the response has been completely processed. + // Timeout includes all retries. + // If not specified, will use the largest timeout among all backend + // services associated with the route. + Timeout *Duration `json:"timeout,omitempty"` + + // UrlRewrite: The spec to modify the URL of the request, prior to + // forwarding the request to the matched service + UrlRewrite *UrlRewrite `json:"urlRewrite,omitempty"` + + // WeightedBackendServices: A list of weighted backend services to send + // traffic to when a route match occurs. The weights determine the + // fraction of traffic that flows to their corresponding backend + // service. If all traffic needs to go to a single backend service, + // there must be one weightedBackendService with weight set to a non 0 + // number. + // Once a backendService is identified and before forwarding the request + // to the backend service, advanced routing actions like Url rewrites + // and header transformations are applied depending on additional + // settings specified in this HttpRouteAction. + WeightedBackendServices []*WeightedBackendService `json:"weightedBackendServices,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CorsPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CorsPolicy") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRouteAction) MarshalJSON() ([]byte, error) { + type NoMethod HttpRouteAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpRouteRule: An HttpRouteRule specifies how to match an HTTP +// request and the corresponding routing action that load balancing +// proxies will perform. +type HttpRouteRule struct { + // Description: The short description conveying the intent of this + // routeRule. + // The description can have a maximum length of 1024 characters. + Description string `json:"description,omitempty"` + + // HeaderAction: Specifies changes to request and response headers that + // need to take effect for the selected backendService. + // The headerAction specified here are applied before the matching + // pathMatchers[].headerAction and after + // pathMatchers[].routeRules[].routeAction.weightedBackendService.backend + // ServiceWeightAction[].headerAction + HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + + MatchRules []*HttpRouteRuleMatch `json:"matchRules,omitempty"` + + // Priority: For routeRules within a given pathMatcher, priority + // determines the order in which load balancer will interpret + // routeRules. RouteRules are evaluated in order of priority, from the + // lowest to highest number. The priority of a rule decreases as its + // number increases (1, 2, 3, N+1). The first rule that matches the + // request is applied. + // You cannot configure two or more routeRules with the same priority. + // Priority for each rule must be set to a number between 0 and + // 2147483647 inclusive. + // Priority numbers can have gaps, which enable you to add or remove + // rules in the future without affecting the rest of the rules. For + // example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority + // numbers to which you could add rules numbered from 6 to 8, 10 to 11, + // and 13 to 15 in the future without any impact on existing rules. + Priority int64 `json:"priority,omitempty"` + + // RouteAction: In response to a matching matchRule, the load balancer + // performs advanced routing actions like URL rewrites, header + // transformations, etc. prior to forwarding the request to the selected + // backend. If routeAction specifies any weightedBackendServices, + // service must not be set. Conversely if service is set, routeAction + // cannot contain any weightedBackendServices. + // Only one of routeAction or urlRedirect must be set. + RouteAction *HttpRouteAction `json:"routeAction,omitempty"` + + // Service: The full or partial URL of the backend service resource to + // which traffic is directed if this rule is matched. If routeAction is + // additionally specified, advanced routing actions like URL Rewrites, + // etc. take effect prior to sending the request to the backend. + // However, if service is specified, routeAction cannot contain any + // weightedBackendService s. Conversely, if routeAction specifies any + // weightedBackendServices, service must not be specified. + // Only one of urlRedirect, service or + // routeAction.weightedBackendService must be set. + Service string `json:"service,omitempty"` + + // UrlRedirect: When this rule is matched, the request is redirected to + // a URL specified by urlRedirect. + // If urlRedirect is specified, service or routeAction must not be set. + UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRouteRule) MarshalJSON() ([]byte, error) { + type NoMethod HttpRouteRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpRouteRuleMatch: HttpRouteRuleMatch specifies a set of criteria +// for matching requests to an HttpRouteRule. All specified criteria +// must be satisfied for a match to occur. +type HttpRouteRuleMatch struct { + // FullPathMatch: For satifying the matchRule condition, the path of the + // request must exactly match the value specified in fullPathMatch after + // removing any query parameters and anchor that may be part of the + // original URL. + // FullPathMatch must be between 1 and 1024 characters. + // Only one of prefixMatch, fullPathMatch or regexMatch must be + // specified. + FullPathMatch string `json:"fullPathMatch,omitempty"` + + // HeaderMatches: Specifies a list of header match criteria, all of + // which must match corresponding headers in the request. + HeaderMatches []*HttpHeaderMatch `json:"headerMatches,omitempty"` + + // IgnoreCase: Specifies that prefixMatch and fullPathMatch matches are + // case sensitive. + // The default value is false. + // caseSensitive must not be used with regexMatch. + IgnoreCase bool `json:"ignoreCase,omitempty"` + + // MetadataFilters: Opaque filter criteria used by Loadbalancer to + // restrict routing configuration to a limited set xDS compliant + // clients. In their xDS requests to Loadbalancer, xDS clients present + // node metadata. If a match takes place, the relevant routing + // configuration is made available to those proxies. + // For each metadataFilter in this list, if its filterMatchCriteria is + // set to MATCH_ANY, at least one of the filterLabels must match the + // corresponding label provided in the metadata. If its + // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels + // must match with corresponding labels in the provided + // metadata. + // metadataFilters specified here can be overrides those specified in + // ForwardingRule that refers to this UrlMap. + // metadataFilters only applies to Loadbalancers that have their + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + + // PrefixMatch: For satifying the matchRule condition, the request's + // path must begin with the specified prefixMatch. prefixMatch must + // begin with a /. + // The value must be between 1 and 1024 characters. + // Only one of prefixMatch, fullPathMatch or regexMatch must be + // specified. + PrefixMatch string `json:"prefixMatch,omitempty"` + + // QueryParameterMatches: Specifies a list of query parameter match + // criteria, all of which must match corresponding query parameters in + // the request. + QueryParameterMatches []*HttpQueryParameterMatch `json:"queryParameterMatches,omitempty"` + + // RegexMatch: For satifying the matchRule condition, the path of the + // request must satisfy the regular expression specified in regexMatch + // after removing any query parameters and anchor supplied with the + // original URL. For regular expression grammar please see + // en.cppreference.com/w/cpp/regex/ecmascript + // Only one of prefixMatch, fullPathMatch or regexMatch must be + // specified. + RegexMatch string `json:"regexMatch,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FullPathMatch") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FullPathMatch") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRouteRuleMatch) MarshalJSON() ([]byte, error) { + type NoMethod HttpRouteRuleMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpsHealthCheck: Represents a legacy HTTPS Health Check +// resource. +// +// Legacy health checks are required by network load balancers. For more +// information, read Health Check Concepts. type HttpsHealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -9357,7 +11667,10 @@ func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Image: An Image resource. (== resource_for beta.images ==) (== +// Image: Represents an Image resource. +// +// You can use images to create boot disks for your VM instances. For +// more information, read Images. (== resource_for beta.images ==) (== // resource_for v1.images ==) type Image struct { // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google @@ -9568,8 +11881,9 @@ type ImageRawDisk struct { // "TAR" ContainerType string `json:"containerType,omitempty"` - // Sha1Checksum: An optional SHA1 checksum of the disk image before - // unpackaging provided by the client when the disk image is created. + // Sha1Checksum: [Deprecated] This field is deprecated. An optional SHA1 + // checksum of the disk image before unpackaging provided by the client + // when the disk image is created. Sha1Checksum string `json:"sha1Checksum,omitempty"` // Source: The full Google Cloud Storage URL where the disk image is @@ -9754,8 +12068,11 @@ func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Instance: An Instance resource. (== resource_for beta.instances ==) -// (== resource_for v1.instances ==) +// Instance: Represents an Instance resource. +// +// An instance is a virtual machine that is hosted on Google Cloud +// Platform. For more information, read Virtual Machine Instances. (== +// resource_for beta.instances ==) (== resource_for v1.instances ==) type Instance struct { // CanIpForward: Allows this instance to send and receive packets with // non-matching destination or source IPs. This is required if you plan @@ -9782,10 +12099,18 @@ type Instance struct { // must be created before you can assign them. Disks []*AttachedDisk `json:"disks,omitempty"` + // DisplayDevice: Enables display device for the instance. + DisplayDevice *DisplayDevice `json:"displayDevice,omitempty"` + // GuestAccelerators: A list of the type and count of accelerator cards // attached to the instance. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` + // Hostname: Specifies the hostname of the instance. The specified + // hostname must be RFC1035 compliant. If hostname is not specified, the + // default hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal when + // using the global DNS, and + // [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when using zonal DNS. Hostname string `json:"hostname,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -9860,6 +12185,10 @@ type Instance struct { // Multiple interfaces are supported per instance. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + // ReservationAffinity: Specifies the reservations that this instance + // can consume from. + ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` + // Scheduling: Sets the scheduling options for this instance. Scheduling *Scheduling `json:"scheduling,omitempty"` @@ -9947,7 +12276,7 @@ type InstanceAggregatedList struct { // server. Id string `json:"id,omitempty"` - // Items: A list of InstancesScopedList resources. + // Items: An object that contains a list of instances scoped by zone. Items map[string]InstancesScopedList `json:"items,omitempty"` // Kind: [Output Only] Type of resource. Always @@ -10098,10 +12427,18 @@ func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceGroup: InstanceGroups (== resource_for beta.instanceGroups -// ==) (== resource_for v1.instanceGroups ==) (== resource_for -// beta.regionInstanceGroups ==) (== resource_for -// v1.regionInstanceGroups ==) +// InstanceGroup: Represents an unmanaged Instance Group resource. +// +// Use unmanaged instance groups if you need to apply load balancing to +// groups of heterogeneous instances or if you need to manage the +// instances yourself. For more information, read Instance groups. +// +// For zonal unmanaged Instance Group, use instanceGroups resource. +// +// For regional unmanaged Instance Group, use regionInstanceGroups +// resource. (== resource_for beta.instanceGroups ==) (== resource_for +// v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) +// (== resource_for v1.regionInstanceGroups ==) type InstanceGroup struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance group in RFC3339 text format. @@ -10503,8 +12840,18 @@ func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceGroupManager: An Instance Group Manager resource. (== -// resource_for beta.instanceGroupManagers ==) (== resource_for +// InstanceGroupManager: Represents a Managed Instance Group +// resource. +// +// An instance group is a collection of VM instances that you can manage +// as a single entity. For more information, read Instance groups. +// +// For zonal Managed Instance Group, use the instanceGroupManagers +// resource. +// +// For regional Managed Instance Group, use the +// regionInstanceGroupManagers resource. (== resource_for +// beta.instanceGroupManagers ==) (== resource_for // v1.instanceGroupManagers ==) (== resource_for // beta.regionInstanceGroupManagers ==) (== resource_for // v1.regionInstanceGroupManagers ==) @@ -11143,6 +13490,13 @@ type InstanceGroupManagerUpdatePolicy struct { // "RESTART" MinimalAction string `json:"minimalAction,omitempty"` + // Type: The type of update process. You can specify either PROACTIVE so + // that the instance group manager proactively executes actions in order + // to bring instances to their target versions or OPPORTUNISTIC so that + // no action is proactively executed but the update will be performed as + // part of other actions (for example, resizes or recreateInstances + // calls). + // // Possible values: // "OPPORTUNISTIC" // "PROACTIVE" @@ -12363,6 +14717,10 @@ type InstanceProperties struct { // interface. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + // ReservationAffinity: Specifies the reservations that this instance + // can consume from. + ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` + // Scheduling: Specifies the scheduling options for the instances that // are created from this template. Scheduling *Scheduling `json:"scheduling,omitempty"` @@ -12431,8 +14789,12 @@ func (s *InstanceReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceTemplate: An Instance Template resource. (== resource_for -// beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==) +// InstanceTemplate: Represents an Instance Template resource. +// +// You can use instance templates to create VM instances and managed +// instance groups. For more information, read Instance Templates. (== +// resource_for beta.instanceTemplates ==) (== resource_for +// v1.instanceTemplates ==) type InstanceTemplate struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance template in RFC3339 text format. @@ -13024,11 +15386,46 @@ func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Interconnect: Represents an Interconnects resource. The Interconnects -// resource is a dedicated connection between Google's network and your -// on-premises network. For more information, see the Dedicated -// overview page. (== resource_for v1.interconnects ==) (== resource_for -// beta.interconnects ==) +// Int64RangeMatch: HttpRouteRuleMatch criteria for field values that +// must stay within the specified integer range. +type Int64RangeMatch struct { + // RangeEnd: The end of the range (exclusive) in signed long integer + // format. + RangeEnd int64 `json:"rangeEnd,omitempty,string"` + + // RangeStart: The start of the range (inclusive) in signed long integer + // format. + RangeStart int64 `json:"rangeStart,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "RangeEnd") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RangeEnd") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { + type NoMethod Int64RangeMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Interconnect: Represents an Interconnect resource. +// +// An Interconnect resource is a dedicated connection between the GCP +// network and your on-premises network. For more information, read the +// Dedicated Interconnect Overview. (== resource_for v1.interconnects +// ==) (== resource_for beta.interconnects ==) type Interconnect struct { // AdminEnabled: Administrative status of the interconnect. When this is // set to true, the Interconnect is functional and can carry traffic. @@ -13074,8 +15471,13 @@ type Interconnect struct { // InterconnectAttachments configured to use this Interconnect. InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` - // InterconnectType: Type of interconnect. Note that "IT_PRIVATE" has - // been deprecated in favor of "DEDICATED" + // InterconnectType: Type of interconnect, which can take one of the + // following values: + // - PARTNER: A partner-managed interconnection shared between customers + // though a partner. + // - DEDICATED: A dedicated physical interconnection with the customer. + // Note that a value IT_PRIVATE has been deprecated in favor of + // DEDICATED. // // Possible values: // "DEDICATED" @@ -13087,10 +15489,15 @@ type Interconnect struct { // for interconnects. Kind string `json:"kind,omitempty"` - // LinkType: Type of link requested. This field indicates speed of each - // of the links in the bundle, not the entire bundle. + // LinkType: Type of link requested, which can take one of the following + // values: + // - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics + // - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note + // that this field indicates the speed of each of the links in the + // bundle, not the speed of the entire bundle. // // Possible values: + // "LINK_TYPE_ETHERNET_100G_LR" // "LINK_TYPE_ETHERNET_10G_LR" LinkType string `json:"linkType,omitempty"` @@ -13114,8 +15521,16 @@ type Interconnect struct { // Notifications. NocContactEmail string `json:"nocContactEmail,omitempty"` - // OperationalStatus: [Output Only] The current status of whether or not - // this Interconnect is functional. + // OperationalStatus: [Output Only] The current status of this + // Interconnect's functionality, which can take one of the following + // values: + // - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to + // use. Attachments may be provisioned on this Interconnect. + // - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No + // attachments may be provisioned on this Interconnect. + // - OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal + // maintenance. No attachments may be provisioned or updated on this + // Interconnect. // // Possible values: // "OS_ACTIVE" @@ -13139,8 +15554,15 @@ type Interconnect struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // State: [Output Only] The current state of whether or not this - // Interconnect is functional. + // State: [Output Only] The current state of Interconnect functionality, + // which can take one of the following values: + // - ACTIVE: The Interconnect is valid, turned up and ready to use. + // Attachments may be provisioned on this Interconnect. + // - UNPROVISIONED: The Interconnect has not completed turnup. No + // attachments may be provisioned on this Interconnect. + // - UNDER_MAINTENANCE: The Interconnect is undergoing internal + // maintenance. No attachments may be provisioned or updated on this + // Interconnect. // // Possible values: // "ACTIVE" @@ -13174,29 +15596,49 @@ func (s *Interconnect) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachment: Represents an InterconnectAttachment (VLAN -// attachment) resource. For more information, see Creating VLAN -// Attachments. (== resource_for beta.interconnectAttachments ==) (== -// resource_for v1.interconnectAttachments ==) +// InterconnectAttachment: Represents an Interconnect Attachment (VLAN) +// resource. +// +// You can use Interconnect attachments (VLANS) to connect your Virtual +// Private Cloud networks to your on-premises networks through an +// Interconnect. For more information, read Creating VLAN Attachments. +// (== resource_for beta.interconnectAttachments ==) (== resource_for +// v1.interconnectAttachments ==) type InterconnectAttachment struct { // AdminEnabled: Determines whether this Attachment will carry packets. // Not present for PARTNER_PROVIDER. AdminEnabled bool `json:"adminEnabled,omitempty"` - // Bandwidth: Provisioned bandwidth capacity for the - // interconnectAttachment. Can be set by the partner to update the - // customer's provisioned bandwidth. Output only for PARTNER type, - // mutable for PARTNER_PROVIDER and DEDICATED. + // Bandwidth: Provisioned bandwidth capacity for the interconnect + // attachment. For attachments of type DEDICATED, the user can set the + // bandwidth. For attachments of type PARTNER, the Google Partner that + // is operating the interconnect must set the bandwidth. Output only for + // PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can + // take one of the following values: + // - BPS_50M: 50 Mbit/s + // - BPS_100M: 100 Mbit/s + // - BPS_200M: 200 Mbit/s + // - BPS_300M: 300 Mbit/s + // - BPS_400M: 400 Mbit/s + // - BPS_500M: 500 Mbit/s + // - BPS_1G: 1 Gbit/s + // - BPS_2G: 2 Gbit/s + // - BPS_5G: 5 Gbit/s + // - BPS_10G: 10 Gbit/s + // - BPS_20G: 20 Gbit/s + // - BPS_50G: 50 Gbit/s // // Possible values: // "BPS_100M" // "BPS_10G" // "BPS_1G" // "BPS_200M" + // "BPS_20G" // "BPS_2G" // "BPS_300M" // "BPS_400M" // "BPS_500M" + // "BPS_50G" // "BPS_50M" // "BPS_5G" Bandwidth string `json:"bandwidth,omitempty"` @@ -13229,12 +15671,16 @@ type InterconnectAttachment struct { Description string `json:"description,omitempty"` // EdgeAvailabilityDomain: Desired availability domain for the - // attachment. Only available for type PARTNER, at creation time. For - // improved reliability, customers should configure a pair of - // attachments with one per availability domain. The selected - // availability domain will be provided to the Partner via the pairing - // key so that the provisioned circuit will lie in the specified domain. - // If not specified, the value will default to AVAILABILITY_DOMAIN_ANY. + // attachment. Only available for type PARTNER, at creation time, and + // can take one of the following values: + // - AVAILABILITY_DOMAIN_ANY + // - AVAILABILITY_DOMAIN_1 + // - AVAILABILITY_DOMAIN_2 For improved reliability, customers should + // configure a pair of attachments, one per availability domain. The + // selected availability domain will be provided to the Partner via the + // pairing key, so that the provisioned circuit will lie in the + // specified domain. If not specified, the value will default to + // AVAILABILITY_DOMAIN_ANY. // // Possible values: // "AVAILABILITY_DOMAIN_1" @@ -13244,7 +15690,7 @@ type InterconnectAttachment struct { // GoogleReferenceId: [Output Only] Google reference ID, to be used when // raising support tickets with Google or otherwise to debug backend - // connectivity issues. + // connectivity issues. [Deprecated] This field is not used. GoogleReferenceId string `json:"googleReferenceId,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -13269,7 +15715,12 @@ type InterconnectAttachment struct { Name string `json:"name,omitempty"` // OperationalStatus: [Output Only] The current status of whether or not - // this interconnect attachment is functional. + // this interconnect attachment is functional, which can take one of the + // following values: + // - OS_ACTIVE: The attachment has been turned up and is ready to use. + // + // - OS_UNPROVISIONED: The attachment is not ready to use yet, because + // turnup is not complete. // // Possible values: // "OS_ACTIVE" @@ -13282,10 +15733,10 @@ type InterconnectAttachment struct { // selected partner. Of the form "XXXXX/region/domain" PairingKey string `json:"pairingKey,omitempty"` - // PartnerAsn: Optional BGP ASN for the router that should be supplied - // by a layer 3 Partner if they configured BGP on behalf of the - // customer. Output only for PARTNER type, input only for - // PARTNER_PROVIDER, not available for DEDICATED. + // PartnerAsn: Optional BGP ASN for the router supplied by a Layer 3 + // Partner if they configured BGP on behalf of the customer. Output only + // for PARTNER type, input only for PARTNER_PROVIDER, not available for + // DEDICATED. PartnerAsn int64 `json:"partnerAsn,omitempty,string"` // PartnerMetadata: Informational metadata about Partner attachments @@ -13314,7 +15765,26 @@ type InterconnectAttachment struct { SelfLink string `json:"selfLink,omitempty"` // State: [Output Only] The current state of this attachment's - // functionality. + // functionality. Enum values ACTIVE and UNPROVISIONED are shared by + // DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect + // attachments, while enum values PENDING_PARTNER, + // PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only + // PARTNER and PARTNER_PROVIDER interconnect attachments. This state can + // take one of the following values: + // - ACTIVE: The attachment has been turned up and is ready to use. + // - UNPROVISIONED: The attachment is not ready to use yet, because + // turnup is not complete. + // - PENDING_PARTNER: A newly-created PARTNER attachment that has not + // yet been configured on the Partner side. + // - PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of + // provisioning after a PARTNER_PROVIDER attachment was created that + // references it. + // - PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is + // waiting for a customer to activate it. + // - DEFUNCT: The attachment was deleted externally and is no longer + // functional. This could be because the associated Interconnect was + // removed, or because the other side of a Partner attachment was + // deleted. // // Possible values: // "ACTIVE" @@ -13326,6 +15796,14 @@ type InterconnectAttachment struct { // "UNPROVISIONED" State string `json:"state,omitempty"` + // Type: The type of interconnect attachment this is, which can take one + // of the following values: + // - DEDICATED: an attachment to a Dedicated Interconnect. + // - PARTNER: an attachment to a Partner Interconnect, created by the + // customer. + // - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created + // by the partner. + // // Possible values: // "DEDICATED" // "PARTNER" @@ -13694,7 +16172,7 @@ type InterconnectAttachmentPartnerMetadata struct { PartnerName string `json:"partnerName,omitempty"` // PortalUrl: URL of the Partner?s portal for this Attachment. Partners - // may customise this to be a deep-link to the specific resource on the + // may customise this to be a deep link to the specific resource on the // Partner portal. This value may be validated to match approved Partner // values. PortalUrl string `json:"portalUrl,omitempty"` @@ -14010,6 +16488,12 @@ type InterconnectDiagnosticsLinkLACPStatus struct { // LACP exchange. NeighborSystemId string `json:"neighborSystemId,omitempty"` + // State: The state of a LACP link, which can take one of the following + // values: + // - ACTIVE: The link is configured and active within the bundle. + // - DETACHED: The link is not configured within the bundle. This means + // that the rest of the object should be empty. + // // Possible values: // "ACTIVE" // "DETACHED" @@ -14308,10 +16792,12 @@ func (s *InterconnectListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectLocation: Represents an InterconnectLocations resource. -// The InterconnectLocations resource describes the locations where you -// can connect to Google's networks. For more information, see -// Colocation Facilities. +// InterconnectLocation: Represents an Interconnect Attachment (VLAN) +// Location resource. +// +// You can use this resource to find location details about an +// Interconnect attachment (VLAN). For more information about +// interconnect attachments, read Creating VLAN Attachments. type InterconnectLocation struct { // Address: [Output Only] The postal address of the Point of Presence, // each line in the address is separated by a newline character. @@ -14328,7 +16814,13 @@ type InterconnectLocation struct { // "Amsterdam, Netherlands". City string `json:"city,omitempty"` - // Continent: [Output Only] Continent for this location. + // Continent: [Output Only] Continent for this location, which can take + // one of the following values: + // - AFRICA + // - ASIA_PAC + // - EUROPE + // - NORTH_AMERICA + // - SOUTH_AMERICA // // Possible values: // "AFRICA" @@ -14381,10 +16873,12 @@ type InterconnectLocation struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of this InterconnectLocation. If the - // status is AVAILABLE, new Interconnects may be provisioned in this - // InterconnectLocation. Otherwise, no new Interconnects may be - // provisioned. + // Status: [Output Only] The status of this InterconnectLocation, which + // can take one of the following values: + // - CLOSED: The InterconnectLocation is closed and is unavailable for + // provisioning new Interconnects. + // - AVAILABLE: The InterconnectLocation is available for provisioning + // new Interconnects. // // Possible values: // "AVAILABLE" @@ -14632,9 +17126,14 @@ type InterconnectOutageNotification struct { // epoch). EndTime int64 `json:"endTime,omitempty,string"` - // IssueType: Form this outage is expected to take. Note that the "IT_" - // versions of this enum have been deprecated in favor of the unprefixed - // values. + // IssueType: Form this outage is expected to take, which can take one + // of the following values: + // - OUTAGE: The Interconnect may be completely out of service for some + // or all of the specified window. + // - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a + // whole should remain up, but with reduced bandwidth. Note that the + // versions of this enum prefixed with "IT_" have been deprecated in + // favor of the unprefixed values. // // Possible values: // "IT_OUTAGE" @@ -14646,8 +17145,10 @@ type InterconnectOutageNotification struct { // Name: Unique identifier for this outage notification. Name string `json:"name,omitempty"` - // Source: The party that generated this notification. Note that - // "NSRC_GOOGLE" has been deprecated in favor of "GOOGLE" + // Source: The party that generated this notification, which can take + // the following value: + // - GOOGLE: this notification as generated by Google. Note that the + // value of NSRC_GOOGLE has been deprecated in favor of GOOGLE. // // Possible values: // "GOOGLE" @@ -14658,12 +17159,20 @@ type InterconnectOutageNotification struct { // Unix epoch). StartTime int64 `json:"startTime,omitempty,string"` - // State: State of this notification. Note that the "NS_" versions of - // this enum have been deprecated in favor of the unprefixed values. + // State: State of this notification, which can take one of the + // following values: + // - ACTIVE: This outage notification is active. The event could be in + // the past, present, or future. See start_time and end_time for + // scheduling. + // - CANCELLED: The outage associated with this notification was + // cancelled before the outage was due to start. Note that the versions + // of this enum prefixed with "NS_" have been deprecated in favor of the + // unprefixed values. // // Possible values: // "ACTIVE" // "CANCELLED" + // "COMPLETED" // "NS_ACTIVE" // "NS_CANCELED" State string `json:"state,omitempty"` @@ -15170,19 +17679,22 @@ func (s *LogConfigCloudAuditOptions) MarshalJSON() ([]byte, error) { // // Examples: counter { metric: "/debug_access_count" field: // "iam_principal" } ==> increment counter -// /iam/policy/backend_debug_access_count {iam_principal=[value of +// /iam/policy/debug_access_count {iam_principal=[value of // IAMContext.principal]} // -// At this time we do not support multiple field names (though this may -// be supported in the future). +// TODO(b/141846426): Consider supporting "authority" and +// "iam_principal" fields in the same counter. type LogConfigCounterOptions struct { + // CustomFields: Custom fields. + CustomFields []*LogConfigCounterOptionsCustomField `json:"customFields,omitempty"` + // Field: The field value to attribute. Field string `json:"field,omitempty"` // Metric: The metric to update. Metric string `json:"metric,omitempty"` - // ForceSendFields is a list of field names (e.g. "Field") to + // ForceSendFields is a list of field names (e.g. "CustomFields") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15190,10 +17702,10 @@ type LogConfigCounterOptions struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Field") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "CustomFields") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -15205,18 +17717,47 @@ func (s *LogConfigCounterOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LogConfigCounterOptionsCustomField: Custom fields. These can be used +// to create a counter with arbitrary field/value pairs. See: +// go/rpcsp-custom-fields. +type LogConfigCounterOptionsCustomField struct { + // Name: Name is the field name. + Name string `json:"name,omitempty"` + + // Value: Value is the field value. It is important that in contrast to + // the CounterOptions.field, the value here is a constant that is not + // derived from the IAMContext. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LogConfigCounterOptionsCustomField) MarshalJSON() ([]byte, error) { + type NoMethod LogConfigCounterOptionsCustomField + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LogConfigDataAccessOptions: Write a Data Access (Gin) log type LogConfigDataAccessOptions struct { // LogMode: Whether Gin logging should happen in a fail-closed manner at // the caller. This is relevant only in the LocalIAM implementation, for // now. // - // NOTE: Logging to Gin in a fail-closed manner is currently unsupported - // while work is being done to satisfy the requirements of go/345. - // Currently, setting LOG_FAIL_CLOSED mode will have no effect, but - // still exists because there is active work being done to support it - // (b/115874152). - // // Possible values: // "LOG_FAIL_CLOSED" // "LOG_MODE_UNSPECIFIED" @@ -15245,8 +17786,12 @@ func (s *LogConfigDataAccessOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MachineType: A Machine Type resource. (== resource_for -// v1.machineTypes ==) (== resource_for beta.machineTypes ==) +// MachineType: Represents a Machine Type resource. +// +// You can use specific machine types for your VM instances based on +// performance and pricing requirements. For more information, read +// Machine Types. (== resource_for v1.machineTypes ==) (== resource_for +// beta.machineTypes ==) type MachineType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -16107,6 +18652,103 @@ func (s *MetadataItems) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MetadataFilter: Opaque filter criteria used by loadbalancers to +// restrict routing configuration to a limited set of loadbalancing +// proxies. Proxies and sidecars involved in loadbalancing would +// typically present metadata to the loadbalancers which need to match +// criteria specified here. If a match takes place, the relevant routing +// configuration is made available to those proxies. +// For each metadataFilter in this list, if its filterMatchCriteria is +// set to MATCH_ANY, at least one of the filterLabels must match the +// corresponding label provided in the metadata. If its +// filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels +// must match with corresponding labels in the provided metadata. +// An example for using metadataFilters would be: if loadbalancing +// involves Envoys, they will only receive routing configuration when +// values in metadataFilters match values supplied in /global/gateways/default-internet-gateway + // projects/project/global/gateways/default-internet-gateway NextHopGateway string `json:"nextHopGateway,omitempty"` + // NextHopIlb: The URL to a forwarding rule of type + // loadBalancingScheme=INTERNAL that should handle matching packets. You + // can only specify the forwarding rule as a partial or full URL. For + // example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule + // - regions/region/forwardingRules/forwardingRule + NextHopIlb string `json:"nextHopIlb,omitempty"` + // NextHopInstance: The URL to an instance that should handle matching // packets. You can specify this as a full or partial URL. For // example: @@ -22277,9 +26764,9 @@ type Route struct { // Priority: The priority of this route. Priority is used to break ties // in cases where there is more than one matching route of equal prefix - // length. In the case of two routes with equal prefix length, the one - // with the lowest-numbered priority value wins. Default value is 1000. - // Valid range is 0 through 65535. + // length. In cases where multiple routes have equal prefix length, the + // one with the lowest-numbered priority value wins. The default value + // is `1000`. The priority value must be from `0` to `65535`, inclusive. Priority int64 `json:"priority,omitempty"` // SelfLink: [Output Only] Server-defined fully-qualified URL for this @@ -22575,14 +27062,18 @@ func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Router: Router resource. +// Router: Represents a Cloud Router resource. +// +// For more information about Cloud Router, read the the Cloud Router +// overview. type Router struct { // Bgp: BGP information specific to this router. Bgp *RouterBgp `json:"bgp,omitempty"` - // BgpPeers: BGP information that needs to be configured into the - // routing stack to establish the BGP peering. It must specify peer ASN - // and either interface name, IP, or peer IP. Please refer to RFC4273. + // BgpPeers: BGP information that must be configured into the routing + // stack to establish BGP peering. This information must specify the + // peer ASN and either the interface name, IP address, or peer IP + // address. Please refer to RFC4273. BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -22598,8 +27089,8 @@ type Router struct { Id uint64 `json:"id,omitempty,string"` // Interfaces: Router interfaces. Each interface requires either one - // linked resource (e.g. linkedVpnTunnel), or IP address and IP address - // range (e.g. ipRange), or both. + // linked resource, (for example, linkedVpnTunnel), or IP address and IP + // address range (for example, ipRange), or both. Interfaces []*RouterInterface `json:"interfaces,omitempty"` // Kind: [Output Only] Type of resource. Always compute#router for @@ -22615,7 +27106,7 @@ type Router struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Nats: A list of Nat services created in this router. + // Nats: A list of NAT services created in this router. Nats []*RouterNat `json:"nats,omitempty"` // Network: URI of the network to which this router belongs. @@ -22846,7 +27337,7 @@ func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { type RouterBgp struct { // AdvertiseMode: User-specified flag to indicate which mode to use for - // advertisement. + // advertisement. The options are DEFAULT or CUSTOM. // // Possible values: // "CUSTOM" @@ -22910,11 +27401,15 @@ type RouterBgpPeer struct { AdvertiseMode string `json:"advertiseMode,omitempty"` // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode. This field can only be populated if advertise_mode is - // CUSTOM and overrides the list defined for the router (in Bgp - // message). These groups will be advertised in addition to any - // specified prefixes. Leave this field blank to advertise no custom - // groups. + // in custom mode, which can take one of the following options: + // - ALL_SUBNETS: Advertises all available subnets, including peer VPC + // subnets. + // - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. + // - ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC + // network. Note that this field can only be populated if advertise_mode + // is CUSTOM and overrides the list defined for the router (in the "bgp" + // message). These groups are advertised in addition to any specified + // prefixes. Leave this field blank to advertise no custom groups. // // Possible values: // "ALL_SUBNETS" @@ -22923,14 +27418,14 @@ type RouterBgpPeer struct { // AdvertisedIpRanges: User-specified list of individual IP ranges to // advertise in custom mode. This field can only be populated if // advertise_mode is CUSTOM and overrides the list defined for the - // router (in Bgp message). These IP ranges will be advertised in + // router (in the "bgp" message). These IP ranges are advertised in // addition to any specified groups. Leave this field blank to advertise // no custom IP ranges. AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` // AdvertisedRoutePriority: The priority of routes advertised to this - // BGP peer. In the case where there is more than one matching route of - // maximum length, the routes with lowest priority value win. + // BGP peer. Where there is more than one matching route of maximum + // length, the routes with the lowest priority value win. AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` // InterfaceName: Name of the interface the BGP peer is associated with. @@ -22941,28 +27436,35 @@ type RouterBgpPeer struct { IpAddress string `json:"ipAddress,omitempty"` // ManagementType: [Output Only] The resource that configures and - // manages this BGP peer. MANAGED_BY_USER is the default value and can - // be managed by you or other users; MANAGED_BY_ATTACHMENT is a BGP peer - // that is configured and managed by Cloud Interconnect, specifically by - // an InterconnectAttachment of type PARTNER. Google will automatically - // create, update, and delete this type of BGP peer when the PARTNER - // InterconnectAttachment is created, updated, or deleted. + // manages this BGP peer. + // - MANAGED_BY_USER is the default value and can be managed by you or + // other users + // - MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed + // by Cloud Interconnect, specifically by an InterconnectAttachment of + // type PARTNER. Google automatically creates, updates, and deletes this + // type of BGP peer when the PARTNER InterconnectAttachment is created, + // updated, or deleted. // // Possible values: // "MANAGED_BY_ATTACHMENT" // "MANAGED_BY_USER" ManagementType string `json:"managementType,omitempty"` - // Name: Name of this BGP peer. The name must be 1-63 characters long - // and comply with RFC1035. + // Name: Name of this BGP peer. The name must be 1-63 characters long, + // and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. Name string `json:"name,omitempty"` - // PeerAsn: Peer BGP Autonomous System Number (ASN). For VPN use case, - // this value can be different for every tunnel. + // PeerAsn: Peer BGP Autonomous System Number (ASN). Each BGP interface + // may use a different value. PeerAsn int64 `json:"peerAsn,omitempty"` - // PeerIpAddress: IP address of the BGP interface outside Google cloud. - // Only IPv4 is supported. + // PeerIpAddress: IP address of the BGP interface outside Google Cloud + // Platform. Only IPv4 is supported. PeerIpAddress string `json:"peerIpAddress,omitempty"` // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to @@ -22990,32 +27492,33 @@ func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { type RouterInterface struct { // IpRange: IP address and range of the interface. The IP range must be - // in the RFC3927 link-local IP space. The value must be a + // in the RFC3927 link-local IP address space. The value must be a // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not // truncate the address as it represents the IP address of the // interface. IpRange string `json:"ipRange,omitempty"` - // LinkedInterconnectAttachment: URI of the linked interconnect + // LinkedInterconnectAttachment: URI of the linked Interconnect // attachment. It must be in the same region as the router. Each - // interface can have at most one linked resource and it could either be - // a VPN Tunnel or an interconnect attachment. + // interface can have one linked resource, which can be either be a VPN + // tunnel or an Interconnect attachment. LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` - // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same - // region as the router. Each interface can have at most one linked - // resource and it could either be a VPN Tunnel or an interconnect + // LinkedVpnTunnel: URI of the linked VPN tunnel, which must be in the + // same region as the router. Each interface can have one linked + // resource, which can be either a VPN tunnel or an Interconnect // attachment. LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` // ManagementType: [Output Only] The resource that configures and - // manages this interface. MANAGED_BY_USER is the default value and can - // be managed by you or other users; MANAGED_BY_ATTACHMENT is an - // interface that is configured and managed by Cloud Interconnect, - // specifically by an InterconnectAttachment of type PARTNER. Google - // will automatically create, update, and delete this type of interface - // when the PARTNER InterconnectAttachment is created, updated, or - // deleted. + // manages this interface. + // - MANAGED_BY_USER is the default value and can be managed directly by + // users. + // - MANAGED_BY_ATTACHMENT is an interface that is configured and + // managed by Cloud Interconnect, specifically, by an + // InterconnectAttachment of type PARTNER. Google automatically creates, + // updates, and deletes this type of interface when the PARTNER + // InterconnectAttachment is created, updated, or deleted. // // Possible values: // "MANAGED_BY_ATTACHMENT" @@ -23023,7 +27526,12 @@ type RouterInterface struct { ManagementType string `json:"managementType,omitempty"` // Name: Name of this interface entry. The name must be 1-63 characters - // long and comply with RFC1035. + // long, and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "IpRange") to @@ -23211,22 +27719,36 @@ func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { // that would be used for NAT. GCP would auto-allocate ephemeral IPs if // no external IPs are provided. type RouterNat struct { + // DrainNatIps: A list of URLs of the IP resources to be drained. These + // IPs must be valid static external IPs that have been assigned to the + // NAT. These IPs should be used for updating/patching a NAT only. + DrainNatIps []string `json:"drainNatIps,omitempty"` + // IcmpIdleTimeoutSec: Timeout (in seconds) for ICMP connections. // Defaults to 30s if not set. IcmpIdleTimeoutSec int64 `json:"icmpIdleTimeoutSec,omitempty"` + // LogConfig: Configure logging on this NAT. + LogConfig *RouterNatLogConfig `json:"logConfig,omitempty"` + // MinPortsPerVm: Minimum number of ports allocated to a VM from this // NAT config. If not set, a default number of ports is allocated to a - // VM. This gets rounded up to the nearest power of 2. Eg. if the value - // of this field is 50, at least 64 ports will be allocated to a VM. + // VM. This is rounded up to the nearest power of 2. For example, if the + // value of this field is 50, at least 64 ports are allocated to a VM. MinPortsPerVm int64 `json:"minPortsPerVm,omitempty"` // Name: Unique name of this Nat service. The name must be 1-63 // characters long and comply with RFC1035. Name string `json:"name,omitempty"` - // NatIpAllocateOption: Specify the NatIpAllocateOption. If it is - // AUTO_ONLY, then nat_ip should be empty. + // NatIpAllocateOption: Specify the NatIpAllocateOption, which can take + // one of the following values: + // - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When + // there are not enough specified Nat IPs, the Nat service fails for new + // VMs. + // - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; + // customers can't specify any Nat IPs. When choosing AUTO_ONLY, then + // nat_ip should be empty. // // Possible values: // "AUTO_ONLY" @@ -23234,12 +27756,20 @@ type RouterNat struct { NatIpAllocateOption string `json:"natIpAllocateOption,omitempty"` // NatIps: A list of URLs of the IP resources used for this Nat service. - // These IPs must be valid static external IP addresses assigned to the - // project. max_length is subject to change post alpha. + // These IP addresses must be valid static external IP addresses + // assigned to the project. NatIps []string `json:"natIps,omitempty"` - // SourceSubnetworkIpRangesToNat: Specify the Nat option. If this field - // contains ALL_SUBNETWORKS_ALL_IP_RANGES or + // SourceSubnetworkIpRangesToNat: Specify the Nat option, which can take + // one of the following values: + // - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every + // Subnetwork are allowed to Nat. + // - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges + // in every Subnetwork are allowed to Nat. + // - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat + // (specified in the field subnetwork below) The default is + // SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this + // field contains ALL_SUBNETWORKS_ALL_IP_RANGES or // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any // other Router.Nat section in any Router for this network in this // region. @@ -23267,21 +27797,20 @@ type RouterNat struct { // to 30s if not set. UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` - // ForceSendFields is a list of field names (e.g. "IcmpIdleTimeoutSec") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "DrainNatIps") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IcmpIdleTimeoutSec") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "DrainNatIps") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -23291,10 +27820,52 @@ func (s *RouterNat) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RouterNatLogConfig: Configuration of logging on a NAT. +type RouterNatLogConfig struct { + // Enable: Indicates whether or not to export logs. This is false by + // default. + Enable bool `json:"enable,omitempty"` + + // Filter: Specify the desired filtering of logs on this NAT. If + // unspecified, logs are exported for all connections handled by this + // NAT. This option can take one of the following values: + // - ERRORS_ONLY: Export logs only for connection failures. + // - TRANSLATIONS_ONLY: Export logs only for successful connections. + // - ALL: Export logs for all connections, successful and unsuccessful. + // + // Possible values: + // "ALL" + // "ERRORS_ONLY" + // "TRANSLATIONS_ONLY" + Filter string `json:"filter,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enable") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enable") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterNatLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod RouterNatLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RouterNatSubnetworkToNat: Defines the IP ranges that want to use NAT // for a subnetwork. type RouterNatSubnetworkToNat struct { - // Name: URL for the subnetwork resource to use NAT. + // Name: URL for the subnetwork resource that will use NAT. Name string `json:"name,omitempty"` // SecondaryIpRangeNames: A list of the secondary ranges of the @@ -23304,7 +27875,7 @@ type RouterNatSubnetworkToNat struct { SecondaryIpRangeNames []string `json:"secondaryIpRangeNames,omitempty"` // SourceIpRangesToNat: Specify the options for NAT ranges in the - // Subnetwork. All usages of single value are valid except + // Subnetwork. All options of a single value are valid except // NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple // values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] // Default: [ALL_IP_RANGES] @@ -23443,6 +28014,14 @@ type RouterStatusNatStatus struct { // ["1.1.1.1", "129.2.16.89"] AutoAllocatedNatIps []string `json:"autoAllocatedNatIps,omitempty"` + // DrainAutoAllocatedNatIps: A list of IPs auto-allocated for NAT that + // are in drain mode. Example: ["1.1.1.1", "179.12.26.133"]. + DrainAutoAllocatedNatIps []string `json:"drainAutoAllocatedNatIps,omitempty"` + + // DrainUserAllocatedNatIps: A list of IPs user-allocated for NAT that + // are in drain mode. Example: ["1.1.1.1", "179.12.26.133"]. + DrainUserAllocatedNatIps []string `json:"drainUserAllocatedNatIps,omitempty"` + // MinExtraNatIpsNeeded: The number of extra IPs to allocate. This will // be greater than 0 only if user-specified IPs are NOT enough to allow // all configured VMs to use NAT. This value is meaningful only when @@ -23825,7 +28404,7 @@ func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Scheduling: Sets the scheduling options for an Instance. +// Scheduling: Sets the scheduling options for an Instance. NextID: 9 type Scheduling struct { // AutomaticRestart: Specifies whether the instance should be // automatically restarted if it is terminated by Compute Engine (not @@ -23837,7 +28416,9 @@ type Scheduling struct { // restarted if it is terminated by Compute Engine. AutomaticRestart *bool `json:"automaticRestart,omitempty"` - // NodeAffinities: A set of node affinity and anti-affinity. + // NodeAffinities: A set of node affinity and anti-affinity + // configurations. Refer to Configuring node affinity for more + // information. NodeAffinities []*SchedulingNodeAffinity `json:"nodeAffinities,omitempty"` // OnHostMaintenance: Defines the maintenance behavior for this @@ -23886,7 +28467,8 @@ type SchedulingNodeAffinity struct { // Key: Corresponds to the label key of Node resource. Key string `json:"key,omitempty"` - // Operator: Defines the operation of node selection. + // Operator: Defines the operation of node selection. Valid operators + // are IN for affinity and NOT_IN for anti-affinity. // // Possible values: // "IN" @@ -23920,10 +28502,13 @@ func (s *SchedulingNodeAffinity) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicy: A security policy is comprised of one or more rules. -// It can also be associated with one or more 'targets'. (== -// resource_for v1.securityPolicies ==) (== resource_for -// beta.securityPolicies ==) +// SecurityPolicy: Represents a Cloud Armor Security Policy +// resource. +// +// Only external backend services that use load balancers can reference +// a Security Policy. For more information, read Cloud Armor Security +// Policy Concepts. (== resource_for v1.securityPolicies ==) (== +// resource_for beta.securityPolicies ==) type SecurityPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -24353,6 +28938,36 @@ func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ServerBinding struct { + // Possible values: + // "RESTART_NODE_ON_ANY_SERVER" + // "RESTART_NODE_ON_MINIMAL_SERVERS" + // "SERVER_BINDING_TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Type") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Type") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ServerBinding) MarshalJSON() ([]byte, error) { + type NoMethod ServerBinding + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ServiceAccount: A service account. type ServiceAccount struct { // Email: Email address of the service account. @@ -24425,8 +29040,8 @@ func (s *ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { // ShieldedInstanceIdentity: A shielded Instance identity entry. type ShieldedInstanceIdentity struct { - // EncryptionKey: An Endorsement Key (EK) issued to the Shielded - // Instance's vTPM. + // EncryptionKey: An Endorsement Key (EK) made by the RSA 2048 algorithm + // issued to the Shielded Instance's vTPM. EncryptionKey *ShieldedInstanceIdentityEntry `json:"encryptionKey,omitempty"` // Kind: [Output Only] Type of the resource. Always @@ -24434,8 +29049,8 @@ type ShieldedInstanceIdentity struct { // entry. Kind string `json:"kind,omitempty"` - // SigningKey: An Attestation Key (AK) issued to the Shielded Instance's - // vTPM. + // SigningKey: An Attestation Key (AK) made by the RSA 2048 algorithm + // issued to the Shielded Instance's vTPM. SigningKey *ShieldedInstanceIdentityEntry `json:"signingKey,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -24566,9 +29181,16 @@ func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Snapshot: A persistent disk snapshot resource. (== resource_for -// beta.snapshots ==) (== resource_for v1.snapshots ==) +// Snapshot: Represents a Persistent Disk Snapshot resource. +// +// You can use snapshots to back up data on a regular interval. For more +// information, read Creating persistent disk snapshots. (== +// resource_for beta.snapshots ==) (== resource_for v1.snapshots ==) type Snapshot struct { + // AutoCreated: [Output Only] Set to true if snapshots are automatically + // by applying resource policy on the target disk. + AutoCreated bool `json:"autoCreated,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -24577,7 +29199,7 @@ type Snapshot struct { // property when you create the resource. Description string `json:"description,omitempty"` - // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. + // DiskSizeGb: [Output Only] Size of the source disk, specified in GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` // Id: [Output Only] The unique identifier for the resource. This @@ -24629,12 +29251,12 @@ type Snapshot struct { // customer-supplied encryption key. // // After you encrypt a snapshot using a customer-supplied key, you must - // provide the same key if you use the image later For example, you must - // provide the encryption key when you create a disk from the encrypted - // snapshot in a future request. + // provide the same key if you use the snapshot later. For example, you + // must provide the encryption key when you create a disk from the + // encrypted snapshot in a future request. // // Customer-supplied encryption keys do not protect access to metadata - // of the disk. + // of the snapshot. // // If you do not provide an encryption key when creating the snapshot, // then the snapshot will be encrypted using an automatically generated @@ -24683,29 +29305,28 @@ type Snapshot struct { // "UP_TO_DATE" StorageBytesStatus string `json:"storageBytesStatus,omitempty"` - // StorageLocations: GCS bucket storage location of the snapshot - // (regional or multi-regional). + // StorageLocations: Cloud Storage bucket storage location of the + // snapshot (regional or multi-regional). StorageLocations []string `json:"storageLocations,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AutoCreated") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AutoCreated") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -24902,10 +29523,13 @@ func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificate: An SslCertificate resource. This resource provides a -// mechanism to upload an SSL key and certificate to the load balancer -// to serve secure connections from the user. (== resource_for -// beta.sslCertificates ==) (== resource_for v1.sslCertificates ==) +// SslCertificate: Represents an SSL Certificate resource. +// +// This SSL certificate resource also contains a private key. You can +// use SSL keys and certificates to secure connections to a load +// balancer. For more information, read Creating and Using SSL +// Certificates. (== resource_for beta.sslCertificates ==) (== +// resource_for v1.sslCertificates ==) type SslCertificate struct { // Certificate: A local certificate file. The certificate must be in PEM // format. The certificate chain must be no greater than 5 certs long. @@ -24941,6 +29565,11 @@ type SslCertificate struct { // requests will include this field. PrivateKey string `json:"privateKey,omitempty"` + // Region: [Output Only] URL of the region where the regional SSL + // Certificate resides. This field is not applicable to global SSL + // Certificate. + Region string `json:"region,omitempty"` + // SelfLink: [Output only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -24971,6 +29600,161 @@ func (s *SslCertificate) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SslCertificateAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of SslCertificatesScopedList resources. + Items map[string]SslCertificatesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#sslCertificateAggregatedList for lists of SSL Certificates. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SslCertificateAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslCertificateAggregatedListWarning: [Output Only] Informational +// warning message. +type SslCertificateAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SslCertificateAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslCertificateAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SslCertificateList: Contains a list of SslCertificate resources. type SslCertificateList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -25126,6 +29910,140 @@ func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SslCertificatesScopedList struct { + // SslCertificates: List of SslCertificates contained in this scope. + SslCertificates []*SslCertificate `json:"sslCertificates,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *SslCertificatesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificatesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificatesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslCertificatesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type SslCertificatesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SslCertificatesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificatesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificatesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslCertificatesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificatesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificatesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SslPoliciesList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. @@ -25310,11 +30228,12 @@ func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPolicy: A SSL policy specifies the server-side support for SSL -// features. This can be attached to a TargetHttpsProxy or a -// TargetSslProxy. This affects connections between clients and the -// HTTPS or SSL proxy load balancer. They do not affect the connection -// between the load balancers and the backends. +// SslPolicy: Represents a Cloud Armor Security Policy resource. +// +// Only external backend services used by HTTP or HTTPS load balancers +// can reference a Security Policy. For more information, read read +// Cloud Armor Security Policy Concepts. (== resource_for +// beta.sslPolicies ==) (== resource_for v1.sslPolicies ==) type SslPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -25549,8 +30468,13 @@ func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subnetwork: A Subnetwork resource. (== resource_for beta.subnetworks -// ==) (== resource_for v1.subnetworks ==) +// Subnetwork: Represents a Subnetwork resource. +// +// A subnetwork (also known as a subnet) is a logical partition of a +// Virtual Private Cloud network with one primary IP range and zero or +// more secondary IP ranges. For more information, read Virtual Private +// Cloud (VPC) Network. (== resource_for beta.subnetworks ==) (== +// resource_for v1.subnetworks ==) type Subnetwork struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -25595,6 +30519,10 @@ type Subnetwork struct { // for Subnetwork resources. Kind string `json:"kind,omitempty"` + // LogConfig: This field denotes the VPC flow logging options for this + // subnetwork. If logging is enabled, logs are exported to Stackdriver. + LogConfig *SubnetworkLogConfig `json:"logConfig,omitempty"` + // Name: The name of the resource, provided by the client when initially // creating the resource. The name must be 1-63 characters long, and // comply with RFC1035. Specifically, the name must be 1-63 characters @@ -25616,10 +30544,34 @@ type Subnetwork struct { // setPrivateIpGoogleAccess. PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` + // Purpose: The purpose of the resource. This field can be either + // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with + // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created + // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If + // unspecified, the purpose defaults to PRIVATE_RFC_1918. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" + // "PRIVATE" + // "PRIVATE_RFC_1918" + Purpose string `json:"purpose,omitempty"` + // Region: URL of the region where the Subnetwork resides. This field // can be set only at resource creation time. Region string `json:"region,omitempty"` + // Role: The role of subnetwork. Currenly, this field is only used when + // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to + // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being + // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // + // Possible values: + // "ACTIVE" + // "BACKUP" + Role string `json:"role,omitempty"` + // SecondaryIpRanges: An array of configurations for secondary IP ranges // for VM instances contained in this subnetwork. The primary IP of such // VM must belong to the primary ipCidrRange of the subnetwork. The @@ -25630,6 +30582,19 @@ type Subnetwork struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // State: [Output Only] The state of the subnetwork, which can be one of + // READY or DRAINING. A subnetwork that is READY is ready to be used. + // The state of DRAINING is only applicable to subnetworks that have the + // purpose set to INTERNAL_HTTPS_LOAD_BALANCER and indicates that + // connections to the load balancer are being drained. A subnetwork that + // is draining cannot be used or modified until it reaches a status of + // READY. + // + // Possible values: + // "DRAINING" + // "READY" + State string `json:"state,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -25968,6 +30933,85 @@ func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SubnetworkLogConfig: The available logging options for this +// subnetwork. +type SubnetworkLogConfig struct { + // AggregationInterval: Can only be specified if VPC flow logging for + // this subnetwork is enabled. Toggles the aggregation interval for + // collecting flow logs. Increasing the interval time will reduce the + // amount of generated flow logs for long lasting connections. Default + // is an interval of 5 seconds per connection. + // + // Possible values: + // "INTERVAL_10_MIN" + // "INTERVAL_15_MIN" + // "INTERVAL_1_MIN" + // "INTERVAL_30_SEC" + // "INTERVAL_5_MIN" + // "INTERVAL_5_SEC" + AggregationInterval string `json:"aggregationInterval,omitempty"` + + // Enable: Whether to enable flow logging for this subnetwork. If this + // field is not explicitly set, it will not appear in get listings. If + // not set the default behavior is to disable flow logging. + Enable bool `json:"enable,omitempty"` + + // FlowSampling: Can only be specified if VPC flow logging for this + // subnetwork is enabled. The value of the field must be in [0, 1]. Set + // the sampling rate of VPC flow logs within the subnetwork where 1.0 + // means all collected logs are reported and 0.0 means no logs are + // reported. Default is 0.5, which means half of all collected logs are + // reported. + FlowSampling float64 `json:"flowSampling,omitempty"` + + // Metadata: Can only be specified if VPC flow logs for this subnetwork + // is enabled. Configures whether all, none or a subset of metadata + // fields should be added to the reported VPC flow logs. Default is + // INCLUDE_ALL_METADATA. + // + // Possible values: + // "EXCLUDE_ALL_METADATA" + // "INCLUDE_ALL_METADATA" + Metadata string `json:"metadata,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AggregationInterval") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AggregationInterval") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *SubnetworkLogConfig) UnmarshalJSON(data []byte) error { + type NoMethod SubnetworkLogConfig + var s1 struct { + FlowSampling gensupport.JSONFloat64 `json:"flowSampling"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FlowSampling = float64(s1.FlowSampling) + return nil +} + // SubnetworkSecondaryRange: Represents a secondary IP range of a // subnetwork. type SubnetworkSecondaryRange struct { @@ -26317,9 +31361,148 @@ func (s *Tags) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an -// HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== -// resource_for v1.targetHttpProxies ==) +type TargetHttpProxiesScopedList struct { + // TargetHttpProxies: A list of TargetHttpProxies contained in this + // scope. + TargetHttpProxies []*TargetHttpProxy `json:"targetHttpProxies,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetHttpProxiesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetHttpProxies") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetHttpProxies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetHttpProxiesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpProxiesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpProxiesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxy: Represents a Target HTTP Proxy resource. +// +// A target HTTP proxy is a component of certain types of load +// balancers. Global forwarding rules reference a target HTTP proxy, and +// the target proxy then references a URL map. For more information, +// read Using Target Proxies. (== resource_for beta.targetHttpProxies +// ==) (== resource_for v1.targetHttpProxies ==) type TargetHttpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -26346,6 +31529,11 @@ type TargetHttpProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // Region: [Output Only] URL of the region where the regional Target + // HTTP Proxy resides. This field is not applicable to global Target + // HTTP Proxies. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -26381,6 +31569,57 @@ func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type TargetHttpProxyAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpProxiesScopedList resources. + Items map[string]TargetHttpProxiesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#targetHttpProxyAggregatedList for lists of Target HTTP + // Proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // TargetHttpProxyList: A list of TargetHttpProxy resources. type TargetHttpProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -26537,6 +31776,141 @@ func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type TargetHttpsProxiesScopedList struct { + // TargetHttpsProxies: A list of TargetHttpsProxies contained in this + // scope. + TargetHttpsProxies []*TargetHttpsProxy `json:"targetHttpsProxies,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetHttpsProxiesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetHttpsProxies") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetHttpsProxies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpsProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetHttpsProxiesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpsProxiesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxiesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TargetHttpsProxiesSetQuicOverrideRequest struct { // QuicOverride: QUIC policy for the TargetHttpsProxy resource. // @@ -26599,9 +31973,13 @@ func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, err return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines -// an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== -// resource_for v1.targetHttpsProxies ==) +// TargetHttpsProxy: Represents a Target HTTPS Proxy resource. +// +// A target HTTPS proxy is a component of certain types of load +// balancers. Global forwarding rules reference a target HTTPS proxy, +// and the target proxy then references a URL map. For more information, +// read Using Target Proxies. (== resource_for beta.targetHttpsProxies +// ==) (== resource_for v1.targetHttpsProxies ==) type TargetHttpsProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -26629,13 +32007,17 @@ type TargetHttpsProxy struct { Name string `json:"name,omitempty"` // QuicOverride: Specifies the QUIC override policy for this - // TargetHttpsProxy resource. This determines whether the load balancer - // will attempt to negotiate QUIC with clients or not. Can specify one - // of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, - // Enables QUIC when set to ENABLE, and disables QUIC when set to - // DISABLE. If NONE is specified, uses the QUIC policy with no user - // overrides, which is equivalent to DISABLE. Not specifying this field - // is equivalent to specifying NONE. + // TargetHttpsProxy resource. This setting determines whether the load + // balancer attempts to negotiate QUIC with clients. You can specify + // NONE, ENABLE, or DISABLE. + // - When quic-override is set to NONE, Google manages whether QUIC is + // used. + // - When quic-override is set to ENABLE, the load balancer uses QUIC + // when possible. + // - When quic-override is set to DISABLE, the load balancer doesn't use + // QUIC. + // - If the quic-override flag is not specified, NONE is implied. + // - // // Possible values: // "DISABLE" @@ -26643,6 +32025,11 @@ type TargetHttpsProxy struct { // "NONE" QuicOverride string `json:"quicOverride,omitempty"` + // Region: [Output Only] URL of the region where the regional + // TargetHttpsProxy resides. This field is not applicable to global + // TargetHttpsProxies. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -26654,7 +32041,7 @@ type TargetHttpsProxy struct { // SslPolicy: URL of SslPolicy resource that will be associated with the // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource - // will not have any SSL policy configured. + // has no SSL policy configured. SslPolicy string `json:"sslPolicy,omitempty"` // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource @@ -26694,6 +32081,162 @@ func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type TargetHttpsProxyAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpsProxiesScopedList resources. + Items map[string]TargetHttpsProxiesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#targetHttpsProxyAggregatedList for lists of Target HTTP + // Proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpsProxyAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpsProxyAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetHttpsProxyAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpsProxyAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxyAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. type TargetHttpsProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -26850,10 +32393,13 @@ func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstance: A TargetInstance resource. This resource defines an -// endpoint instance that terminates traffic of certain protocols. (== -// resource_for beta.targetInstances ==) (== resource_for -// v1.targetInstances ==) +// TargetInstance: Represents a Target Instance resource. +// +// You can use a target instance to handle traffic for one or more +// forwarding rules, which is ideal for forwarding protocol traffic that +// is managed by a single source. For example, ESP, AH, TCP, or UDP. For +// more information, read Target instances. (== resource_for +// beta.targetInstances ==) (== resource_for v1.targetInstances ==) type TargetInstance struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -27376,10 +32922,13 @@ func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPool: A TargetPool resource. This resource defines a pool of -// instances, an associated HttpHealthCheck resource, and the fallback -// target pool. (== resource_for beta.targetPools ==) (== resource_for -// v1.targetPools ==) +// TargetPool: Represents a Target Pool resource. +// +// Target pools are used for network TCP/UDP load balancing. A target +// pool references member instances, an associated legacy +// HttpHealthCheck resource, and, optionally, a backup target pool. For +// more information, read Using target pools. (== resource_for +// beta.targetPools ==) (== resource_for v1.targetPools ==) type TargetPool struct { // BackupPool: This field is applicable only when the containing target // pool is serving a forwarding rule as the primary pool, and its @@ -27476,6 +33025,8 @@ type TargetPool struct { // "CLIENT_IP_PORT_PROTO" // "CLIENT_IP_PROTO" // "GENERATED_COOKIE" + // "HEADER_FIELD" + // "HTTP_COOKIE" // "NONE" SessionAffinity string `json:"sessionAffinity,omitempty"` @@ -28234,9 +33785,13 @@ func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxy: A TargetSslProxy resource. This resource defines an -// SSL proxy. (== resource_for beta.targetSslProxies ==) (== -// resource_for v1.targetSslProxies ==) +// TargetSslProxy: Represents a Target SSL Proxy resource. +// +// A target SSL proxy is a component of a SSL Proxy load balancer. +// Global forwarding rules reference a target SSL proxy, and the target +// proxy then references an external backend service. For more +// information, read Using Target Proxies. (== resource_for +// beta.targetSslProxies ==) (== resource_for v1.targetSslProxies ==) type TargetSslProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -28532,9 +34087,13 @@ func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a -// TCP proxy. (== resource_for beta.targetTcpProxies ==) (== -// resource_for v1.targetTcpProxies ==) +// TargetTcpProxy: Represents a Target TCP Proxy resource. +// +// A target TCP proxy is a component of a TCP Proxy load balancer. +// Global forwarding rules reference target TCP proxy, and the target +// proxy then references an external backend service. For more +// information, read TCP Proxy Load Balancing Concepts. (== resource_for +// beta.targetTcpProxies ==) (== resource_for v1.targetTcpProxies ==) type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -28759,7 +34318,10 @@ func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGateway: Represents a Target VPN gateway resource. (== +// TargetVpnGateway: Represents a Target VPN Gateway resource. +// +// The target VPN gateway resource represents a Classic Cloud VPN +// gateway. For more information, read the the Cloud VPN Overview. (== // resource_for beta.targetVpnGateways ==) (== resource_for // v1.targetVpnGateways ==) type TargetVpnGateway struct { @@ -29387,14 +34949,30 @@ func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMap: A UrlMap resource. This resource defines the mapping from URL -// to the BackendService resource, based on the "longest-match" of the -// URL's host and path. +// UrlMap: Represents a URL Map resource. +// +// A URL map resource is a component of certain types of load balancers. +// This resource defines mappings from host names and URL paths to +// either a backend service or a backend bucket. +// +// To use this resource, the backend service must have a +// loadBalancingScheme of either EXTERNAL, INTERNAL_SELF_MANAGED, or +// INTERNAL_MANAGED For more information, read URL Map Concepts. type UrlMap struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // DefaultRouteAction: defaultRouteAction takes effect when none of the + // hostRules match. The load balancer performs advanced routing actions + // like URL rewrites, header transformations, etc. prior to forwarding + // the request to the selected backend. If defaultRouteAction specifies + // any weightedBackendServices, defaultService must not be set. + // Conversely if defaultService is set, defaultRouteAction cannot + // contain any weightedBackendServices. + // Only one of defaultRouteAction or defaultUrlRedirect must be set. + DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` + // DefaultService: The full or partial URL of the defaultService // resource to which traffic is directed if none of the hostRules match. // If defaultRouteAction is additionally specified, advanced routing @@ -29407,6 +34985,12 @@ type UrlMap struct { // defaultRouteAction.weightedBackendService must be set. DefaultService string `json:"defaultService,omitempty"` + // DefaultUrlRedirect: When none of the specified hostRules match, the + // request is redirected to a URL specified by defaultUrlRedirect. + // If defaultUrlRedirect is specified, defaultService or + // defaultRouteAction must not be set. + DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` + // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` @@ -29421,6 +35005,12 @@ type UrlMap struct { // UrlMap. Fingerprint string `json:"fingerprint,omitempty"` + // HeaderAction: Specifies changes to request and response headers that + // need to take effect for the selected backendService. + // The headerAction specified here take effect after headerAction + // specified under pathMatcher. + HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + // HostRules: The list of HostRules to use against the URL. HostRules []*HostRule `json:"hostRules,omitempty"` @@ -29444,6 +35034,12 @@ type UrlMap struct { // PathMatchers: The list of named PathMatchers to use against the URL. PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` + // Region: [Output Only] URL of the region where the regional URL map + // resides. This field is not applicable to global URL maps. You must + // specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -29737,6 +35333,293 @@ func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type UrlMapsAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of UrlMapsScopedList resources. + Items map[string]UrlMapsScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *UrlMapsAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapsAggregatedListWarning: [Output Only] Informational warning +// message. +type UrlMapsAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UrlMapsAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsScopedList struct { + // UrlMaps: A list of UrlMaps contained in this scope. + UrlMaps []*UrlMap `json:"urlMaps,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *UrlMapsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UrlMaps") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UrlMaps") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapsScopedListWarning: Informational warning which replaces the +// list of backend services when the list is empty. +type UrlMapsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UrlMapsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type UrlMapsValidateRequest struct { // Resource: Content of the UrlMap to be validated. Resource *UrlMap `json:"resource,omitempty"` @@ -29794,6 +35677,44 @@ func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UrlRewrite: The spec for modifying the path before sending the +// request to the matched backend service. +type UrlRewrite struct { + // HostRewrite: Prior to forwarding the request to the selected service, + // the request's host header is replaced with contents of + // hostRewrite. + // The value must be between 1 and 255 characters. + HostRewrite string `json:"hostRewrite,omitempty"` + + // PathPrefixRewrite: Prior to forwarding the request to the selected + // backend service, the matching portion of the request's path is + // replaced by pathPrefixRewrite. + // The value must be between 1 and 1024 characters. + PathPrefixRewrite string `json:"pathPrefixRewrite,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HostRewrite") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HostRewrite") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlRewrite) MarshalJSON() ([]byte, error) { + type NoMethod UrlRewrite + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // UsableSubnetwork: Subnetwork which the current user has // compute.subnetworks.use permission on. type UsableSubnetwork struct { @@ -30102,12 +36023,23 @@ func (s *VmEndpointNatMappings) MarshalJSON() ([]byte, error) { // VmEndpointNatMappingsInterfaceNatMappings: Contain information of Nat // mapping for an interface of this endpoint. type VmEndpointNatMappingsInterfaceNatMappings struct { + // DrainNatIpPortRanges: List of all drain IP:port-range mappings + // assigned to this interface. These ranges are inclusive, that is, both + // the first and the last ports can be used for NAT. Example: + // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + DrainNatIpPortRanges []string `json:"drainNatIpPortRanges,omitempty"` + // NatIpPortRanges: A list of all IP:port-range mappings assigned to // this interface. These ranges are inclusive, that is, both the first // and the last ports can be used for NAT. Example: // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. NatIpPortRanges []string `json:"natIpPortRanges,omitempty"` + // NumTotalDrainNatPorts: Total number of drain ports across all NAT IPs + // allocated to this interface. It equals to the aggregated port number + // in the field drain_nat_ip_port_ranges. + NumTotalDrainNatPorts int64 `json:"numTotalDrainNatPorts,omitempty"` + // NumTotalNatPorts: Total number of ports across all NAT IPs allocated // to this interface. It equals to the aggregated port number in the // field nat_ip_port_ranges. @@ -30121,15 +36053,16 @@ type VmEndpointNatMappingsInterfaceNatMappings struct { // SourceVirtualIp: Primary IP of the VM for this NIC. SourceVirtualIp string `json:"sourceVirtualIp,omitempty"` - // ForceSendFields is a list of field names (e.g. "NatIpPortRanges") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "DrainNatIpPortRanges") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NatIpPortRanges") to + // NullFields is a list of field names (e.g. "DrainNatIpPortRanges") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -30303,8 +36236,758 @@ func (s *VmEndpointNatMappingsListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnel: VPN tunnel resource. (== resource_for beta.vpnTunnels ==) -// (== resource_for v1.vpnTunnels ==) +// VpnGateway: Represents a VPN gateway resource. +type VpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#vpnGateway for + // VPN gateways. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // VpnGateway, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve an + // VpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this VpnGateway resource. These can be + // later modified by the setLabels method. Each label key/value must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the VPN gateway + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // VpnInterfaces: [Output Only] A list of interfaces on this VPN + // gateway. + VpnInterfaces []*VpnGatewayVpnGatewayInterface `json:"vpnInterfaces,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod VpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnGateway resources. + Items map[string]VpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnGateway for + // VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnGatewayAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnGatewayAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnGatewayAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayList: Contains a list of VpnGateway resources. +type VpnGatewayList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnGateway resources. + Items []*VpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnGateway for + // VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayListWarning: [Output Only] Informational warning message. +type VpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayStatus struct { + // VpnConnections: List of VPN connection for this VpnGateway. + VpnConnections []*VpnGatewayStatusVpnConnection `json:"vpnConnections,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnConnections") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnConnections") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatus) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayStatusHighAvailabilityRequirementState: Describes the high +// availability requirement state for the VPN connection between this +// Cloud VPN gateway and a peer gateway. +type VpnGatewayStatusHighAvailabilityRequirementState struct { + // State: Indicates the high availability requirement state for the VPN + // connection. Valid values are CONNECTION_REDUNDANCY_MET, + // CONNECTION_REDUNDANCY_NOT_MET. + // + // Possible values: + // "CONNECTION_REDUNDANCY_MET" + // "CONNECTION_REDUNDANCY_NOT_MET" + State string `json:"state,omitempty"` + + // UnsatisfiedReason: Indicates the reason why the VPN connection does + // not meet the high availability redundancy criteria/requirement. Valid + // values is INCOMPLETE_TUNNELS_COVERAGE. + // + // Possible values: + // "INCOMPLETE_TUNNELS_COVERAGE" + UnsatisfiedReason string `json:"unsatisfiedReason,omitempty"` + + // ForceSendFields is a list of field names (e.g. "State") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "State") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatusHighAvailabilityRequirementState) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatusHighAvailabilityRequirementState + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayStatusTunnel: Contains some information about a VPN tunnel. +type VpnGatewayStatusTunnel struct { + // LocalGatewayInterface: The VPN gateway interface this VPN tunnel is + // associated with. + LocalGatewayInterface int64 `json:"localGatewayInterface,omitempty"` + + // PeerGatewayInterface: The peer gateway interface this VPN tunnel is + // connected to, the peer gateway could either be an external VPN + // gateway or GCP VPN gateway. + PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` + + // TunnelUrl: URL reference to the VPN tunnel. + TunnelUrl string `json:"tunnelUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "LocalGatewayInterface") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LocalGatewayInterface") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatusTunnel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayStatusVpnConnection: A VPN connection contains all VPN +// tunnels connected from this VpnGateway to the same peer gateway. The +// peer gateway could either be a external VPN gateway or GCP VPN +// gateway. +type VpnGatewayStatusVpnConnection struct { + // PeerExternalGateway: URL reference to the peer external VPN gateways + // to which the VPN tunnels in this VPN connection are connected. This + // field is mutually exclusive with peer_gcp_gateway. + PeerExternalGateway string `json:"peerExternalGateway,omitempty"` + + // PeerGcpGateway: URL reference to the peer side VPN gateways to which + // the VPN tunnels in this VPN connection are connected. This field is + // mutually exclusive with peer_gcp_gateway. + PeerGcpGateway string `json:"peerGcpGateway,omitempty"` + + // State: HighAvailabilityRequirementState for the VPN connection. + State *VpnGatewayStatusHighAvailabilityRequirementState `json:"state,omitempty"` + + // Tunnels: List of VPN tunnels that are in this VPN connection. + Tunnels []*VpnGatewayStatusTunnel `json:"tunnels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PeerExternalGateway") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PeerExternalGateway") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatusVpnConnection) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatusVpnConnection + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayVpnGatewayInterface: A VPN gateway interface. +type VpnGatewayVpnGatewayInterface struct { + // Id: The numeric ID of this VPN gateway interface. + Id int64 `json:"id,omitempty"` + + // IpAddress: The external IP address for this VPN gateway interface. + IpAddress string `json:"ipAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayVpnGatewayInterface) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayVpnGatewayInterface + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewaysGetStatusResponse struct { + Result *VpnGatewayStatus `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysGetStatusResponse) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysGetStatusResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewaysScopedList struct { + // VpnGateways: [Output Only] A list of VPN gateways contained in this + // scope. + VpnGateways []*VpnGateway `json:"vpnGateways,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *VpnGatewaysScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnGateways") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnGateways") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewaysScopedListWarning: [Output Only] Informational warning +// which replaces the list of addresses when the list is empty. +type VpnGatewaysScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnGatewaysScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewaysScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnel: Represents a Cloud VPN Tunnel resource. +// +// For more information about VPN, read the the Cloud VPN Overview. (== +// resource_for beta.vpnTunnels ==) (== resource_for v1.vpnTunnels ==) type VpnTunnel struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -30346,6 +37029,26 @@ type VpnTunnel struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // PeerExternalGateway: URL of the peer side external VPN gateway to + // which this VPN tunnel is connected. Provided by the client when the + // VPN tunnel is created. This field is exclusive with the field + // peerGcpGateway. + PeerExternalGateway string `json:"peerExternalGateway,omitempty"` + + // PeerExternalGatewayInterface: The interface ID of the external VPN + // gateway to which this VPN tunnel is connected. Provided by the client + // when the VPN tunnel is created. + PeerExternalGatewayInterface int64 `json:"peerExternalGatewayInterface,omitempty"` + + // PeerGcpGateway: URL of the peer side HA GCP VPN gateway to which this + // VPN tunnel is connected. Provided by the client when the VPN tunnel + // is created. This field can be used when creating highly available VPN + // from VPC network to VPC network, the field is exclusive with the + // field peerExternalGateway. If provided, the VPN tunnel will + // automatically use the same vpnGatewayInterface ID in the peer GCP VPN + // gateway. + PeerGcpGateway string `json:"peerGcpGateway,omitempty"` + // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. PeerIp string `json:"peerIp,omitempty"` @@ -30390,6 +37093,17 @@ type VpnTunnel struct { // // - FAILED: Tunnel creation has failed and the tunnel is not ready to // be used. + // - NO_INCOMING_PACKETS: No incoming packets from peer. + // - REJECTED: Tunnel configuration was rejected, can be result of being + // blacklisted. + // - ALLOCATING_RESOURCES: Cloud VPN is in the process of allocating all + // required resources. + // - STOPPED: Tunnel is stopped due to its Forwarding Rules being + // deleted for Classic VPN tunnels or the project is in frozen state. + // - PEER_IDENTITY_MISMATCH: Peer identity does not match peer IP, + // probably behind NAT. + // - TS_NARROWING_NOT_ALLOWED: Traffic selector narrowing not allowed + // for an HA-VPN tunnel. // // Possible values: // "ALLOCATING_RESOURCES" @@ -30403,6 +37117,7 @@ type VpnTunnel struct { // "NO_INCOMING_PACKETS" // "PROVISIONING" // "REJECTED" + // "STOPPED" // "WAITING_FOR_FULL_CONFIG" Status string `json:"status,omitempty"` @@ -30411,6 +37126,16 @@ type VpnTunnel struct { // created. TargetVpnGateway string `json:"targetVpnGateway,omitempty"` + // VpnGateway: URL of the VPN gateway with which this VPN tunnel is + // associated. Provided by the client when the VPN tunnel is created. + // This must be used (instead of target_vpn_gateway) if a High + // Availability VPN gateway resource is created. + VpnGateway string `json:"vpnGateway,omitempty"` + + // VpnGatewayInterface: The interface ID of the VPN gateway with which + // this VPN tunnel is associated. + VpnGatewayInterface int64 `json:"vpnGatewayInterface,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -30882,6 +37607,58 @@ func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// WeightedBackendService: In contrast to a single BackendService in +// HttpRouteAction to which all matching traffic is directed to, +// WeightedBackendService allows traffic to be split across multiple +// BackendServices. The volume of traffic for each BackendService is +// proportional to the weight specified in each WeightedBackendService +type WeightedBackendService struct { + // BackendService: The full or partial URL to the default BackendService + // resource. Before forwarding the request to backendService, the + // loadbalancer applies any relevant headerActions specified as part of + // this backendServiceWeight. + BackendService string `json:"backendService,omitempty"` + + // HeaderAction: Specifies changes to request and response headers that + // need to take effect for the selected backendService. + // headerAction specified here take effect before headerAction in the + // enclosing HttpRouteRule, PathMatcher and UrlMap. + HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + + // Weight: Specifies the fraction of traffic sent to backendService, + // computed as weight / (sum of all weightedBackendService weights in + // routeAction) . + // The selection of a backend service is determined only for new + // traffic. Once a user's request has been directed to a backendService, + // subsequent requests will be sent to the same backendService as + // determined by the BackendService's session affinity policy. + // The value must be between 0 and 1000 + Weight int64 `json:"weight,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *WeightedBackendService) MarshalJSON() ([]byte, error) { + type NoMethod WeightedBackendService + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type XpnHostList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. @@ -31073,8 +37850,12 @@ func (s *XpnResourceId) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Zone: A Zone resource. (== resource_for beta.zones ==) (== -// resource_for v1.zones ==) Next ID: 17 +// Zone: Represents a Zone resource. +// +// A zone is a deployment area. These deployment areas are subsets of a +// region. For example the zone us-east1-a is located in the us-east1 +// region. For more information, read Regions and Zones. (== +// resource_for beta.zones ==) (== resource_for v1.zones ==) type Zone struct { // AvailableCpuPlatforms: [Output Only] Available cpu/platform // selections for the zone. @@ -31385,6 +38166,7 @@ type AcceleratorTypesAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of accelerator types. +// (== suppress_warning http-rest-shadowed ==) func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -31491,6 +38273,7 @@ func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -31552,7 +38335,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of accelerator types.", + // "description": "Retrieves an aggregated list of accelerator types. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.acceleratorTypes.aggregatedList", // "parameterOrder": [ @@ -31637,7 +38420,8 @@ type AcceleratorTypesGetCall struct { header_ http.Header } -// Get: Returns the specified accelerator type. +// Get: Returns the specified accelerator type. (== suppress_warning +// http-rest-shadowed ==) func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -31683,6 +38467,7 @@ func (c *AcceleratorTypesGetCall) Header() http.Header { func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -31746,7 +38531,7 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } return ret, nil // { - // "description": "Returns the specified accelerator type.", + // "description": "Returns the specified accelerator type. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.acceleratorTypes.get", // "parameterOrder": [ @@ -31803,7 +38588,7 @@ type AcceleratorTypesListCall struct { } // List: Retrieves a list of accelerator types available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -31911,6 +38696,7 @@ func (c *AcceleratorTypesListCall) Header() http.Header { func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -31973,7 +38759,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato } return ret, nil // { - // "description": "Retrieves a list of accelerator types available to the specified project.", + // "description": "Retrieves a list of accelerator types available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.acceleratorTypes.list", // "parameterOrder": [ @@ -32064,7 +38850,8 @@ type AddressesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of addresses. +// AggregatedList: Retrieves an aggregated list of addresses. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -32172,6 +38959,7 @@ func (c *AddressesAggregatedListCall) Header() http.Header { func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -32233,7 +39021,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address } return ret, nil // { - // "description": "Retrieves an aggregated list of addresses.", + // "description": "Retrieves an aggregated list of addresses. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.addresses.aggregatedList", // "parameterOrder": [ @@ -32317,7 +39105,8 @@ type AddressesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified address resource. +// Delete: Deletes the specified address resource. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -32373,6 +39162,7 @@ func (c *AddressesDeleteCall) Header() http.Header { func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -32433,7 +39223,7 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified address resource.", + // "description": "Deletes the specified address resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.addresses.delete", // "parameterOrder": [ @@ -32494,7 +39284,8 @@ type AddressesGetCall struct { header_ http.Header } -// Get: Returns the specified address resource. +// Get: Returns the specified address resource. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -32541,6 +39332,7 @@ func (c *AddressesGetCall) Header() http.Header { func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -32604,7 +39396,7 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } return ret, nil // { - // "description": "Returns the specified address resource.", + // "description": "Returns the specified address resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.addresses.get", // "parameterOrder": [ @@ -32660,8 +39452,9 @@ type AddressesInsertCall struct { header_ http.Header } -// Insert: Creates an address resource in the specified project using -// the data included in the request. +// Insert: Creates an address resource in the specified project by using +// the data included in the request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -32717,6 +39510,7 @@ func (c *AddressesInsertCall) Header() http.Header { func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -32781,7 +39575,7 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", + // "description": "Creates an address resource in the specified project by using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.addresses.insert", // "parameterOrder": [ @@ -32837,7 +39631,7 @@ type AddressesListCall struct { } // List: Retrieves a list of addresses contained within the specified -// region. +// region. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list func (r *AddressesService) List(project string, region string) *AddressesListCall { c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -32946,6 +39740,7 @@ func (c *AddressesListCall) Header() http.Header { func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -33008,7 +39803,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro } return ret, nil // { - // "description": "Retrieves a list of addresses contained within the specified region.", + // "description": "Retrieves a list of addresses contained within the specified region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.addresses.list", // "parameterOrder": [ @@ -33099,7 +39894,8 @@ type AutoscalersAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of autoscalers. +// AggregatedList: Retrieves an aggregated list of autoscalers. (== +// suppress_warning http-rest-shadowed ==) func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -33206,6 +40002,7 @@ func (c *AutoscalersAggregatedListCall) Header() http.Header { func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -33267,7 +40064,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos } return ret, nil // { - // "description": "Retrieves an aggregated list of autoscalers.", + // "description": "Retrieves an aggregated list of autoscalers. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.autoscalers.aggregatedList", // "parameterOrder": [ @@ -33351,7 +40148,8 @@ type AutoscalersDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified autoscaler. +// Delete: Deletes the specified autoscaler. (== suppress_warning +// http-rest-shadowed ==) func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -33406,6 +40204,7 @@ func (c *AutoscalersDeleteCall) Header() http.Header { func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -33466,7 +40265,7 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified autoscaler.", + // "description": "Deletes the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.autoscalers.delete", // "parameterOrder": [ @@ -33528,7 +40327,8 @@ type AutoscalersGetCall struct { } // Get: Returns the specified autoscaler resource. Gets a list of -// available autoscalers by making a list() request. +// available autoscalers by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -33574,6 +40374,7 @@ func (c *AutoscalersGetCall) Header() http.Header { func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -33637,7 +40438,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.autoscalers.get", // "parameterOrder": [ @@ -33694,7 +40495,7 @@ type AutoscalersInsertCall struct { } // Insert: Creates an autoscaler in the specified project using the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -33749,6 +40550,7 @@ func (c *AutoscalersInsertCall) Header() http.Header { func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -33813,7 +40615,7 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Creates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.autoscalers.insert", // "parameterOrder": [ @@ -33869,7 +40671,7 @@ type AutoscalersListCall struct { } // List: Retrieves a list of autoscalers contained within the specified -// zone. +// zone. (== suppress_warning http-rest-shadowed ==) func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -33977,6 +40779,7 @@ func (c *AutoscalersListCall) Header() http.Header { func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -34039,7 +40842,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "description": "Retrieves a list of autoscalers contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.autoscalers.list", // "parameterOrder": [ @@ -34133,7 +40936,8 @@ type AutoscalersPatchCall struct { // Patch: Updates an autoscaler in the specified project using the data // included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. +// uses the JSON merge patch format and processing rules. (== +// suppress_warning http-rest-shadowed ==) func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -34195,6 +40999,7 @@ func (c *AutoscalersPatchCall) Header() http.Header { func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -34259,7 +41064,7 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.autoscalers.patch", // "parameterOrder": [ @@ -34321,7 +41126,7 @@ type AutoscalersUpdateCall struct { } // Update: Updates an autoscaler in the specified project using the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -34383,6 +41188,7 @@ func (c *AutoscalersUpdateCall) Header() http.Header { func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -34447,7 +41253,7 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "description": "Updates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.autoscalers.update", // "parameterOrder": [ @@ -34509,7 +41315,7 @@ type BackendBucketsAddSignedUrlKeyCall struct { } // AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend bucket. +// for this backend bucket. (== suppress_warning http-rest-shadowed ==) func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -34564,6 +41370,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -34628,7 +41435,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", + // "description": "Adds a key for validating requests with signed URLs for this backend bucket. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.backendBuckets.addSignedUrlKey", // "parameterOrder": [ @@ -34681,7 +41488,8 @@ type BackendBucketsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified BackendBucket resource. +// Delete: Deletes the specified BackendBucket resource. (== +// suppress_warning http-rest-shadowed ==) func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -34735,6 +41543,7 @@ func (c *BackendBucketsDeleteCall) Header() http.Header { func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -34794,7 +41603,7 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified BackendBucket resource.", + // "description": "Deletes the specified BackendBucket resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.backendBuckets.delete", // "parameterOrder": [ @@ -34846,7 +41655,8 @@ type BackendBucketsDeleteSignedUrlKeyCall struct { } // DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend bucket. +// URLs for this backend bucket. (== suppress_warning http-rest-shadowed +// ==) func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -34901,6 +41711,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -34960,7 +41771,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + // "description": "Deletes a key for validating requests with signed URLs for this backend bucket. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.backendBuckets.deleteSignedUrlKey", // "parameterOrder": [ @@ -35019,7 +41830,8 @@ type BackendBucketsGetCall struct { } // Get: Returns the specified BackendBucket resource. Gets a list of -// available backend buckets by making a list() request. +// available backend buckets by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -35064,6 +41876,7 @@ func (c *BackendBucketsGetCall) Header() http.Header { func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -35126,7 +41939,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.backendBuckets.get", // "parameterOrder": [ @@ -35174,7 +41987,8 @@ type BackendBucketsInsertCall struct { } // Insert: Creates a BackendBucket resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -35228,6 +42042,7 @@ func (c *BackendBucketsInsertCall) Header() http.Header { func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -35291,7 +42106,7 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + // "description": "Creates a BackendBucket resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.backendBuckets.insert", // "parameterOrder": [ @@ -35338,7 +42153,7 @@ type BackendBucketsListCall struct { } // List: Retrieves the list of BackendBucket resources available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -35445,6 +42260,7 @@ func (c *BackendBucketsListCall) Header() http.Header { func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -35506,7 +42322,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke } return ret, nil // { - // "description": "Retrieves the list of BackendBucket resources available to the specified project.", + // "description": "Retrieves the list of BackendBucket resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.backendBuckets.list", // "parameterOrder": [ @@ -35592,7 +42408,8 @@ type BackendBucketsPatchCall struct { // Patch: Updates the specified BackendBucket resource with the data // included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. +// uses the JSON merge patch format and processing rules. (== +// suppress_warning http-rest-shadowed ==) func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -35647,6 +42464,7 @@ func (c *BackendBucketsPatchCall) Header() http.Header { func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -35711,7 +42529,7 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.backendBuckets.patch", // "parameterOrder": [ @@ -35767,7 +42585,7 @@ type BackendBucketsUpdateCall struct { } // Update: Updates the specified BackendBucket resource with the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -35822,6 +42640,7 @@ func (c *BackendBucketsUpdateCall) Header() http.Header { func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -35886,7 +42705,7 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "description": "Updates the specified BackendBucket resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.backendBuckets.update", // "parameterOrder": [ @@ -35942,7 +42761,7 @@ type BackendServicesAddSignedUrlKeyCall struct { } // AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend service. +// for this backend service. (== suppress_warning http-rest-shadowed ==) func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -35997,6 +42816,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -36061,7 +42881,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend service.", + // "description": "Adds a key for validating requests with signed URLs for this backend service. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.backendServices.addSignedUrlKey", // "parameterOrder": [ @@ -36115,7 +42935,8 @@ type BackendServicesAggregatedListCall struct { } // AggregatedList: Retrieves the list of all BackendService resources, -// regional and global, available to the specified project. +// regional and global, available to the specified project. (== +// suppress_warning http-rest-shadowed ==) func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -36222,6 +43043,7 @@ func (c *BackendServicesAggregatedListCall) Header() http.Header { func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -36283,7 +43105,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B } return ret, nil // { - // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.backendServices.aggregatedList", // "parameterOrder": [ @@ -36366,7 +43188,8 @@ type BackendServicesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified BackendService resource. +// Delete: Deletes the specified BackendService resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -36421,6 +43244,7 @@ func (c *BackendServicesDeleteCall) Header() http.Header { func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -36480,7 +43304,7 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified BackendService resource.", + // "description": "Deletes the specified BackendService resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.backendServices.delete", // "parameterOrder": [ @@ -36532,7 +43356,8 @@ type BackendServicesDeleteSignedUrlKeyCall struct { } // DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend service. +// URLs for this backend service. (== suppress_warning +// http-rest-shadowed ==) func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -36587,6 +43412,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -36646,7 +43472,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend service.", + // "description": "Deletes a key for validating requests with signed URLs for this backend service. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.backendServices.deleteSignedUrlKey", // "parameterOrder": [ @@ -36705,7 +43531,8 @@ type BackendServicesGetCall struct { } // Get: Returns the specified BackendService resource. Gets a list of -// available backend services. +// available backend services. (== suppress_warning http-rest-shadowed +// ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -36751,6 +43578,7 @@ func (c *BackendServicesGetCall) Header() http.Header { func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -36813,7 +43641,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } return ret, nil // { - // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + // "description": "Returns the specified BackendService resource. Gets a list of available backend services. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.backendServices.get", // "parameterOrder": [ @@ -36862,7 +43690,7 @@ type BackendServicesGetHealthCall struct { } // GetHealth: Gets the most recent health check results for this -// BackendService. +// BackendService. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -36899,6 +43727,7 @@ func (c *BackendServicesGetHealthCall) Header() http.Header { func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -36963,7 +43792,7 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Gets the most recent health check results for this BackendService.", + // "description": "Gets the most recent health check results for this BackendService. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.backendServices.getHealth", // "parameterOrder": [ @@ -37015,7 +43844,8 @@ type BackendServicesInsertCall struct { // Insert: Creates a BackendService resource in the specified project // using the data included in the request. There are several // restrictions and guidelines to keep in mind when creating a backend -// service. Read Restrictions and Guidelines for more information. +// service. Read Restrictions and Guidelines for more information. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -37070,6 +43900,7 @@ func (c *BackendServicesInsertCall) Header() http.Header { func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -37133,7 +43964,7 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.backendServices.insert", // "parameterOrder": [ @@ -37180,7 +44011,7 @@ type BackendServicesListCall struct { } // List: Retrieves the list of BackendService resources available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list func (r *BackendServicesService) List(project string) *BackendServicesListCall { c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -37288,6 +44119,7 @@ func (c *BackendServicesListCall) Header() http.Header { func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -37349,7 +44181,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ } return ret, nil // { - // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "description": "Retrieves the list of BackendService resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.backendServices.list", // "parameterOrder": [ @@ -37438,7 +44270,7 @@ type BackendServicesPatchCall struct { // guidelines to keep in mind when updating a backend service. Read // Restrictions and Guidelines for more information. This method // supports PATCH semantics and uses the JSON merge patch format and -// processing rules. +// processing rules. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -37494,6 +44326,7 @@ func (c *BackendServicesPatchCall) Header() http.Header { func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -37558,7 +44391,7 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.backendServices.patch", // "parameterOrder": [ @@ -37614,7 +44447,7 @@ type BackendServicesSetSecurityPolicyCall struct { } // SetSecurityPolicy: Sets the security policy for the specified backend -// service. +// service. (== suppress_warning http-rest-shadowed ==) func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -37669,6 +44502,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -37733,7 +44567,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the security policy for the specified backend service.", + // "description": "Sets the security policy for the specified backend service. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.backendServices.setSecurityPolicy", // "parameterOrder": [ @@ -37790,7 +44624,8 @@ type BackendServicesUpdateCall struct { // Update: Updates the specified BackendService resource with the data // included in the request. There are several restrictions and // guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. +// Restrictions and Guidelines for more information. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -37846,6 +44681,7 @@ func (c *BackendServicesUpdateCall) Header() http.Header { func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -37910,7 +44746,7 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.backendServices.update", // "parameterOrder": [ @@ -37964,7 +44800,8 @@ type DiskTypesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of disk types. +// AggregatedList: Retrieves an aggregated list of disk types. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -38072,6 +44909,7 @@ func (c *DiskTypesAggregatedListCall) Header() http.Header { func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -38133,7 +44971,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of disk types.", + // "description": "Retrieves an aggregated list of disk types. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.diskTypes.aggregatedList", // "parameterOrder": [ @@ -38219,7 +45057,8 @@ type DiskTypesGetCall struct { } // Get: Returns the specified disk type. Gets a list of available disk -// types by making a list() request. +// types by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -38266,6 +45105,7 @@ func (c *DiskTypesGetCall) Header() http.Header { func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -38329,7 +45169,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.diskTypes.get", // "parameterOrder": [ @@ -38386,7 +45226,7 @@ type DiskTypesListCall struct { } // List: Retrieves a list of disk types available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -38495,6 +45335,7 @@ func (c *DiskTypesListCall) Header() http.Header { func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -38557,7 +45398,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } return ret, nil // { - // "description": "Retrieves a list of disk types available to the specified project.", + // "description": "Retrieves a list of disk types available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.diskTypes.list", // "parameterOrder": [ @@ -38637,6 +45478,195 @@ func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) err } } +// method id "compute.disks.addResourcePolicies": + +type DisksAddResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddResourcePolicies: Adds existing resource policies to a disk. You +// can only add one policy which will be applied to this disk for +// scheduling snapshot creation. (== suppress_warning http-rest-shadowed +// ==) +func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { + c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksaddresourcepoliciesrequest = disksaddresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddResourcePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksAddResourcePoliciesCall) Context(ctx context.Context) *DisksAddResourcePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksAddResourcePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksaddresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/addResourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.addResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.disks.addResourcePolicies", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + // "request": { + // "$ref": "DisksAddResourcePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.disks.aggregatedList": type DisksAggregatedListCall struct { @@ -38648,7 +45678,8 @@ type DisksAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of persistent disks. +// AggregatedList: Retrieves an aggregated list of persistent disks. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -38756,6 +45787,7 @@ func (c *DisksAggregatedListCall) Header() http.Header { func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -38817,7 +45849,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } return ret, nil // { - // "description": "Retrieves an aggregated list of persistent disks.", + // "description": "Retrieves an aggregated list of persistent disks. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.disks.aggregatedList", // "parameterOrder": [ @@ -38903,6 +45935,7 @@ type DisksCreateSnapshotCall struct { } // CreateSnapshot: Creates a snapshot of a specified persistent disk. +// (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -38913,7 +45946,10 @@ func (r *DisksService) CreateSnapshot(project string, zone string, disk string, return c } -// GuestFlush sets the optional parameter "guestFlush": +// GuestFlush sets the optional parameter "guestFlush": [Input Only] +// Specifies to create an application consistent snapshot by informing +// the OS to prepare for the snapshot process. Currently only supported +// on Windows instances using the Volume Shadow Copy Service (VSS). func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) return c @@ -38965,6 +46001,7 @@ func (c *DisksCreateSnapshotCall) Header() http.Header { func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -39030,7 +46067,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk.", + // "description": "Creates a snapshot of a specified persistent disk. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.disks.createSnapshot", // "parameterOrder": [ @@ -39047,6 +46084,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "guestFlush": { + // "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", // "location": "query", // "type": "boolean" // }, @@ -39100,7 +46138,8 @@ type DisksDeleteCall struct { // Delete: Deletes the specified persistent disk. Deleting a disk // removes its data permanently and is irreversible. However, deleting a // disk does not delete any snapshots previously made from the disk. You -// must separately delete snapshots. +// must separately delete snapshots. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -39156,6 +46195,7 @@ func (c *DisksDeleteCall) Header() http.Header { func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -39216,7 +46256,7 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.disks.delete", // "parameterOrder": [ @@ -39277,7 +46317,8 @@ type DisksGetCall struct { } // Get: Returns a specified persistent disk. Gets a list of available -// persistent disks by making a list() request. +// persistent disks by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -39324,6 +46365,7 @@ func (c *DisksGetCall) Header() http.Header { func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -39387,7 +46429,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.disks.get", // "parameterOrder": [ @@ -39445,7 +46487,8 @@ type DisksGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -39491,6 +46534,7 @@ func (c *DisksGetIamPolicyCall) Header() http.Header { func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -39554,7 +46598,7 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.disks.getIamPolicy", // "parameterOrder": [ @@ -39614,7 +46658,8 @@ type DisksInsertCall struct { // data in the request. You can create a disk with a sourceImage, a // sourceSnapshot, or create an empty 500 GB data disk by omitting all // properties. You can also create a disk that is larger than the -// default size by specifying the sizeGb property. +// default size by specifying the sizeGb property. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -39677,6 +46722,7 @@ func (c *DisksInsertCall) Header() http.Header { func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -39741,7 +46787,7 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.disks.insert", // "parameterOrder": [ @@ -39802,7 +46848,7 @@ type DisksListCall struct { } // List: Retrieves a list of persistent disks contained within the -// specified zone. +// specified zone. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list func (r *DisksService) List(project string, zone string) *DisksListCall { c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -39911,6 +46957,7 @@ func (c *DisksListCall) Header() http.Header { func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -39973,7 +47020,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { } return ret, nil // { - // "description": "Retrieves a list of persistent disks contained within the specified zone.", + // "description": "Retrieves a list of persistent disks contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.disks.list", // "parameterOrder": [ @@ -40053,6 +47100,193 @@ func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) erro } } +// method id "compute.disks.removeResourcePolicies": + +type DisksRemoveResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveResourcePolicies: Removes resource policies from a disk. (== +// suppress_warning http-rest-shadowed ==) +func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { + c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksremoveresourcepoliciesrequest = disksremoveresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemoveResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRemoveResourcePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksRemoveResourcePoliciesCall) Context(ctx context.Context) *DisksRemoveResourcePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksremoveresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.removeResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes resource policies from a disk. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.disks.removeResourcePolicies", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", + // "request": { + // "$ref": "DisksRemoveResourcePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.disks.resize": type DisksResizeCall struct { @@ -40067,7 +47301,7 @@ type DisksResizeCall struct { } // Resize: Resizes the specified persistent disk. You can only increase -// the size of the disk. +// the size of the disk. (== suppress_warning http-rest-shadowed ==) func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -40123,6 +47357,7 @@ func (c *DisksResizeCall) Header() http.Header { func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -40188,7 +47423,7 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", + // "description": "Resizes the specified persistent disk. You can only increase the size of the disk. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.disks.resize", // "parameterOrder": [ @@ -40253,7 +47488,8 @@ type DisksSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -40290,6 +47526,7 @@ func (c *DisksSetIamPolicyCall) Header() http.Header { func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -40355,7 +47592,7 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.disks.setIamPolicy", // "parameterOrder": [ @@ -40415,7 +47652,8 @@ type DisksSetLabelsCall struct { } // SetLabels: Sets the labels on a disk. To learn more about labels, -// read the Labeling Resources documentation. +// read the Labeling Resources documentation. (== suppress_warning +// http-rest-shadowed ==) func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -40471,6 +47709,7 @@ func (c *DisksSetLabelsCall) Header() http.Header { func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -40536,7 +47775,7 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.disks.setLabels", // "parameterOrder": [ @@ -40601,7 +47840,7 @@ type DisksTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -40638,6 +47877,7 @@ func (c *DisksTestIamPermissionsCall) Header() http.Header { func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -40703,7 +47943,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.disks.testIamPermissions", // "parameterOrder": [ @@ -40750,6 +47990,1055 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } +// method id "compute.externalVpnGateways.delete": + +type ExternalVpnGatewaysDeleteCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified externalVpnGateway. (== +// suppress_warning http-rest-shadowed ==) +func (r *ExternalVpnGatewaysService) Delete(project string, externalVpnGateway string) *ExternalVpnGatewaysDeleteCall { + c := &ExternalVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalVpnGateway = externalVpnGateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ExternalVpnGatewaysDeleteCall) RequestId(requestId string) *ExternalVpnGatewaysDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysDeleteCall) Context(ctx context.Context) *ExternalVpnGatewaysDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified externalVpnGateway. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "DELETE", + // "id": "compute.externalVpnGateways.delete", + // "parameterOrder": [ + // "project", + // "externalVpnGateway" + // ], + // "parameters": { + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateways to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.externalVpnGateways.get": + +type ExternalVpnGatewaysGetCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified externalVpnGateway. Get a list of +// available externalVpnGateways by making a list() request. (== +// suppress_warning http-rest-shadowed ==) +func (r *ExternalVpnGatewaysService) Get(project string, externalVpnGateway string) *ExternalVpnGatewaysGetCall { + c := &ExternalVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalVpnGateway = externalVpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysGetCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ExternalVpnGatewaysGetCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysGetCall) Context(ctx context.Context) *ExternalVpnGatewaysGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.get" call. +// Exactly one of *ExternalVpnGateway or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ExternalVpnGateway.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGateway, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ExternalVpnGateway{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.externalVpnGateways.get", + // "parameterOrder": [ + // "project", + // "externalVpnGateway" + // ], + // "parameters": { + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", + // "response": { + // "$ref": "ExternalVpnGateway" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.externalVpnGateways.insert": + +type ExternalVpnGatewaysInsertCall struct { + s *Service + project string + externalvpngateway *ExternalVpnGateway + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a ExternalVpnGateway in the specified project using +// the data included in the request. (== suppress_warning +// http-rest-shadowed ==) +func (r *ExternalVpnGatewaysService) Insert(project string, externalvpngateway *ExternalVpnGateway) *ExternalVpnGatewaysInsertCall { + c := &ExternalVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalvpngateway = externalvpngateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ExternalVpnGatewaysInsertCall) RequestId(requestId string) *ExternalVpnGatewaysInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysInsertCall) Context(ctx context.Context) *ExternalVpnGatewaysInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.externalvpngateway) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways", + // "request": { + // "$ref": "ExternalVpnGateway" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.externalVpnGateways.list": + +type ExternalVpnGatewaysListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of ExternalVpnGateway available to the +// specified project. (== suppress_warning http-rest-shadowed ==) +func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysListCall { + c := &ExternalVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ExternalVpnGatewaysListCall) Filter(filter string) *ExternalVpnGatewaysListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ExternalVpnGatewaysListCall) MaxResults(maxResults int64) *ExternalVpnGatewaysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ExternalVpnGatewaysListCall) OrderBy(orderBy string) *ExternalVpnGatewaysListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGatewaysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysListCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ExternalVpnGatewaysListCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysListCall) Context(ctx context.Context) *ExternalVpnGatewaysListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.list" call. +// Exactly one of *ExternalVpnGatewayList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ExternalVpnGatewayList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGatewayList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ExternalVpnGatewayList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of ExternalVpnGateway available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.externalVpnGateways.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways", + // "response": { + // "$ref": "ExternalVpnGatewayList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ExternalVpnGatewaysListCall) Pages(ctx context.Context, f func(*ExternalVpnGatewayList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.externalVpnGateways.setLabels": + +type ExternalVpnGatewaysSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an ExternalVpnGateway. To learn more +// about labels, read the Labeling Resources documentation. (== +// suppress_warning http-rest-shadowed ==) +func (r *ExternalVpnGatewaysService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ExternalVpnGatewaysSetLabelsCall { + c := &ExternalVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysSetLabelsCall) Context(ctx context.Context) *ExternalVpnGatewaysSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.externalVpnGateways.testIamPermissions": + +type ExternalVpnGatewaysTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. (== suppress_warning http-rest-shadowed ==) +func (r *ExternalVpnGatewaysService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ExternalVpnGatewaysTestIamPermissionsCall { + c := &ExternalVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *ExternalVpnGatewaysTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.firewalls.delete": type FirewallsDeleteCall struct { @@ -40761,7 +49050,8 @@ type FirewallsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified firewall. +// Delete: Deletes the specified firewall. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -40816,6 +49106,7 @@ func (c *FirewallsDeleteCall) Header() http.Header { func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -40875,7 +49166,7 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified firewall.", + // "description": "Deletes the specified firewall. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.firewalls.delete", // "parameterOrder": [ @@ -40927,7 +49218,8 @@ type FirewallsGetCall struct { header_ http.Header } -// Get: Returns the specified firewall. +// Get: Returns the specified firewall. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -40973,6 +49265,7 @@ func (c *FirewallsGetCall) Header() http.Header { func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -41035,7 +49328,7 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } return ret, nil // { - // "description": "Returns the specified firewall.", + // "description": "Returns the specified firewall. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.firewalls.get", // "parameterOrder": [ @@ -41083,7 +49376,8 @@ type FirewallsInsertCall struct { } // Insert: Creates a firewall rule in the specified project using the -// data included in the request. +// data included in the request. (== suppress_warning http-rest-shadowed +// ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -41138,6 +49432,7 @@ func (c *FirewallsInsertCall) Header() http.Header { func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -41201,7 +49496,7 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "description": "Creates a firewall rule in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.firewalls.insert", // "parameterOrder": [ @@ -41248,7 +49543,7 @@ type FirewallsListCall struct { } // List: Retrieves the list of firewall rules available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list func (r *FirewallsService) List(project string) *FirewallsListCall { c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -41356,6 +49651,7 @@ func (c *FirewallsListCall) Header() http.Header { func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -41417,7 +49713,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err } return ret, nil // { - // "description": "Retrieves the list of firewall rules available to the specified project.", + // "description": "Retrieves the list of firewall rules available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.firewalls.list", // "parameterOrder": [ @@ -41503,7 +49799,8 @@ type FirewallsPatchCall struct { // Patch: Updates the specified firewall rule with the data included in // the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. +// merge patch format and processing rules. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -41559,6 +49856,7 @@ func (c *FirewallsPatchCall) Header() http.Header { func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -41623,7 +49921,7 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.firewalls.patch", // "parameterOrder": [ @@ -41679,9 +49977,9 @@ type FirewallsUpdateCall struct { } // Update: Updates the specified firewall rule with the data included in -// the request. The PUT method can only update the following fields of -// firewall rule: allowed, description, sourceRanges, sourceTags, -// targetTags. +// the request. Note that all fields will be updated if using PUT, even +// fields that are not specified. To update individual fields, please +// use PATCH instead. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -41737,6 +50035,7 @@ func (c *FirewallsUpdateCall) Header() http.Header { func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -41801,7 +50100,7 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", + // "description": "Updates the specified firewall rule with the data included in the request. Note that all fields will be updated if using PUT, even fields that are not specified. To update individual fields, please use PATCH instead. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.firewalls.update", // "parameterOrder": [ @@ -41855,7 +50154,8 @@ type ForwardingRulesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of forwarding rules. +// AggregatedList: Retrieves an aggregated list of forwarding rules. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -41963,6 +50263,7 @@ func (c *ForwardingRulesAggregatedListCall) Header() http.Header { func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -42024,7 +50325,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F } return ret, nil // { - // "description": "Retrieves an aggregated list of forwarding rules.", + // "description": "Retrieves an aggregated list of forwarding rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.forwardingRules.aggregatedList", // "parameterOrder": [ @@ -42108,7 +50409,8 @@ type ForwardingRulesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified ForwardingRule resource. +// Delete: Deletes the specified ForwardingRule resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -42164,6 +50466,7 @@ func (c *ForwardingRulesDeleteCall) Header() http.Header { func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -42224,7 +50527,7 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified ForwardingRule resource.", + // "description": "Deletes the specified ForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.forwardingRules.delete", // "parameterOrder": [ @@ -42285,7 +50588,8 @@ type ForwardingRulesGetCall struct { header_ http.Header } -// Get: Returns the specified ForwardingRule resource. +// Get: Returns the specified ForwardingRule resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -42332,6 +50636,7 @@ func (c *ForwardingRulesGetCall) Header() http.Header { func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -42395,7 +50700,7 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } return ret, nil // { - // "description": "Returns the specified ForwardingRule resource.", + // "description": "Returns the specified ForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.forwardingRules.get", // "parameterOrder": [ @@ -42452,7 +50757,8 @@ type ForwardingRulesInsertCall struct { } // Insert: Creates a ForwardingRule resource in the specified project -// and region using the data included in the request. +// and region using the data included in the request. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -42508,6 +50814,7 @@ func (c *ForwardingRulesInsertCall) Header() http.Header { func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -42572,7 +50879,7 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.forwardingRules.insert", // "parameterOrder": [ @@ -42628,7 +50935,8 @@ type ForwardingRulesListCall struct { } // List: Retrieves a list of ForwardingRule resources available to the -// specified project and region. +// specified project and region. (== suppress_warning http-rest-shadowed +// ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -42737,6 +51045,7 @@ func (c *ForwardingRulesListCall) Header() http.Header { func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -42799,7 +51108,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR } return ret, nil // { - // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.forwardingRules.list", // "parameterOrder": [ @@ -42893,7 +51202,8 @@ type ForwardingRulesSetTargetCall struct { } // SetTarget: Changes target URL for forwarding rule. The new target -// should be of the same type as the old target. +// should be of the same type as the old target. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -42950,6 +51260,7 @@ func (c *ForwardingRulesSetTargetCall) Header() http.Header { func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -43015,7 +51326,7 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.forwardingRules.setTarget", // "parameterOrder": [ @@ -43077,7 +51388,8 @@ type GlobalAddressesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified address resource. +// Delete: Deletes the specified address resource. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -43132,6 +51444,7 @@ func (c *GlobalAddressesDeleteCall) Header() http.Header { func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -43191,7 +51504,7 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified address resource.", + // "description": "Deletes the specified address resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.globalAddresses.delete", // "parameterOrder": [ @@ -43244,7 +51557,8 @@ type GlobalAddressesGetCall struct { } // Get: Returns the specified address resource. Gets a list of available -// addresses by making a list() request. +// addresses by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -43290,6 +51604,7 @@ func (c *GlobalAddressesGetCall) Header() http.Header { func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -43352,7 +51667,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } return ret, nil // { - // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", + // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.globalAddresses.get", // "parameterOrder": [ @@ -43399,8 +51714,9 @@ type GlobalAddressesInsertCall struct { header_ http.Header } -// Insert: Creates an address resource in the specified project using -// the data included in the request. +// Insert: Creates an address resource in the specified project by using +// the data included in the request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -43455,6 +51771,7 @@ func (c *GlobalAddressesInsertCall) Header() http.Header { func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -43518,7 +51835,7 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", + // "description": "Creates an address resource in the specified project by using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.globalAddresses.insert", // "parameterOrder": [ @@ -43564,7 +51881,8 @@ type GlobalAddressesListCall struct { header_ http.Header } -// List: Retrieves a list of global addresses. +// List: Retrieves a list of global addresses. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -43672,6 +51990,7 @@ func (c *GlobalAddressesListCall) Header() http.Header { func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -43733,7 +52052,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList } return ret, nil // { - // "description": "Retrieves a list of global addresses.", + // "description": "Retrieves a list of global addresses. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.globalAddresses.list", // "parameterOrder": [ @@ -43816,7 +52135,8 @@ type GlobalForwardingRulesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified GlobalForwardingRule resource. +// Delete: Deletes the specified GlobalForwardingRule resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -43871,6 +52191,7 @@ func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -43930,7 +52251,7 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified GlobalForwardingRule resource.", + // "description": "Deletes the specified GlobalForwardingRule resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.globalForwardingRules.delete", // "parameterOrder": [ @@ -43983,7 +52304,8 @@ type GlobalForwardingRulesGetCall struct { } // Get: Returns the specified GlobalForwardingRule resource. Gets a list -// of available forwarding rules by making a list() request. +// of available forwarding rules by making a list() request. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -44029,6 +52351,7 @@ func (c *GlobalForwardingRulesGetCall) Header() http.Header { func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44091,7 +52414,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } return ret, nil // { - // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", + // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.globalForwardingRules.get", // "parameterOrder": [ @@ -44139,7 +52462,8 @@ type GlobalForwardingRulesInsertCall struct { } // Insert: Creates a GlobalForwardingRule resource in the specified -// project using the data included in the request. +// project using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -44194,6 +52518,7 @@ func (c *GlobalForwardingRulesInsertCall) Header() http.Header { func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44257,7 +52582,7 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.globalForwardingRules.insert", // "parameterOrder": [ @@ -44304,7 +52629,7 @@ type GlobalForwardingRulesListCall struct { } // List: Retrieves a list of GlobalForwardingRule resources available to -// the specified project. +// the specified project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -44412,6 +52737,7 @@ func (c *GlobalForwardingRulesListCall) Header() http.Header { func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44473,7 +52799,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa } return ret, nil // { - // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", + // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.globalForwardingRules.list", // "parameterOrder": [ @@ -44558,7 +52884,8 @@ type GlobalForwardingRulesSetTargetCall struct { } // SetTarget: Changes target URL for the GlobalForwardingRule resource. -// The new target should be of the same type as the old target. +// The new target should be of the same type as the old target. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -44614,6 +52941,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44678,7 +53006,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.globalForwardingRules.setTarget", // "parameterOrder": [ @@ -44732,7 +53060,8 @@ type GlobalOperationsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of all operations. +// AggregatedList: Retrieves an aggregated list of all operations. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -44840,6 +53169,7 @@ func (c *GlobalOperationsAggregatedListCall) Header() http.Header { func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44901,7 +53231,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of all operations.", + // "description": "Retrieves an aggregated list of all operations. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.globalOperations.aggregatedList", // "parameterOrder": [ @@ -44984,7 +53314,8 @@ type GlobalOperationsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified Operations resource. +// Delete: Deletes the specified Operations resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -45020,6 +53351,7 @@ func (c *GlobalOperationsDeleteCall) Header() http.Header { func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45054,7 +53386,7 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Deletes the specified Operations resource.", + // "description": "Deletes the specified Operations resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.globalOperations.delete", // "parameterOrder": [ @@ -45099,7 +53431,8 @@ type GlobalOperationsGetCall struct { } // Get: Retrieves the specified Operations resource. Gets a list of -// operations by making a list() request. +// operations by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -45145,6 +53478,7 @@ func (c *GlobalOperationsGetCall) Header() http.Header { func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45207,7 +53541,7 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", + // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.globalOperations.get", // "parameterOrder": [ @@ -45255,7 +53589,7 @@ type GlobalOperationsListCall struct { } // List: Retrieves a list of Operation resources contained within the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -45363,6 +53697,7 @@ func (c *GlobalOperationsListCall) Header() http.Header { func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45424,7 +53759,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified project.", + // "description": "Retrieves a list of Operation resources contained within the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.globalOperations.list", // "parameterOrder": [ @@ -45496,6 +53831,260 @@ func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } +// method id "compute.healthChecks.aggregatedList": + +type HealthChecksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all HealthCheck resources, +// regional and global, available to the specified project. (== +// suppress_warning http-rest-shadowed ==) +func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { + c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/healthChecks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.healthChecks.aggregatedList" call. +// Exactly one of *HealthChecksAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *HealthChecksAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HealthChecksAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.healthChecks.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/healthChecks", + // "response": { + // "$ref": "HealthChecksAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.healthChecks.delete": type HealthChecksDeleteCall struct { @@ -45507,7 +54096,8 @@ type HealthChecksDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. +// Delete: Deletes the specified HealthCheck resource. (== +// suppress_warning http-rest-shadowed ==) func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -45561,6 +54151,7 @@ func (c *HealthChecksDeleteCall) Header() http.Header { func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45620,7 +54211,7 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource.", + // "description": "Deletes the specified HealthCheck resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.healthChecks.delete", // "parameterOrder": [ @@ -45673,7 +54264,8 @@ type HealthChecksGetCall struct { } // Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. +// available health checks by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -45718,6 +54310,7 @@ func (c *HealthChecksGetCall) Header() http.Header { func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45780,7 +54373,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.healthChecks.get", // "parameterOrder": [ @@ -45828,7 +54421,8 @@ type HealthChecksInsertCall struct { } // Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. +// the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -45882,6 +54476,7 @@ func (c *HealthChecksInsertCall) Header() http.Header { func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45945,7 +54540,7 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.healthChecks.insert", // "parameterOrder": [ @@ -45992,7 +54587,7 @@ type HealthChecksListCall struct { } // List: Retrieves the list of HealthCheck resources available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *HealthChecksService) List(project string) *HealthChecksListCall { c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -46099,6 +54694,7 @@ func (c *HealthChecksListCall) Header() http.Header { func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46160,7 +54756,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "description": "Retrieves the list of HealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.healthChecks.list", // "parameterOrder": [ @@ -46247,6 +54843,7 @@ type HealthChecksPatchCall struct { // Patch: Updates a HealthCheck resource in the specified project using // the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. +// (== suppress_warning http-rest-shadowed ==) func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -46301,6 +54898,7 @@ func (c *HealthChecksPatchCall) Header() http.Header { func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46365,7 +54963,7 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.healthChecks.patch", // "parameterOrder": [ @@ -46421,7 +55019,8 @@ type HealthChecksUpdateCall struct { } // Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. +// the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -46476,6 +55075,7 @@ func (c *HealthChecksUpdateCall) Header() http.Header { func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46540,7 +55140,7 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.healthChecks.update", // "parameterOrder": [ @@ -46594,7 +55194,8 @@ type HttpHealthChecksDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified HttpHealthCheck resource. +// Delete: Deletes the specified HttpHealthCheck resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -46649,6 +55250,7 @@ func (c *HttpHealthChecksDeleteCall) Header() http.Header { func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46708,7 +55310,7 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified HttpHealthCheck resource.", + // "description": "Deletes the specified HttpHealthCheck resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.httpHealthChecks.delete", // "parameterOrder": [ @@ -46761,7 +55363,8 @@ type HttpHealthChecksGetCall struct { } // Get: Returns the specified HttpHealthCheck resource. Gets a list of -// available HTTP health checks by making a list() request. +// available HTTP health checks by making a list() request. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -46807,6 +55410,7 @@ func (c *HttpHealthChecksGetCall) Header() http.Header { func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46869,7 +55473,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.httpHealthChecks.get", // "parameterOrder": [ @@ -46917,7 +55521,8 @@ type HttpHealthChecksInsertCall struct { } // Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -46972,6 +55577,7 @@ func (c *HttpHealthChecksInsertCall) Header() http.Header { func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47035,7 +55641,7 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.httpHealthChecks.insert", // "parameterOrder": [ @@ -47082,7 +55688,7 @@ type HttpHealthChecksListCall struct { } // List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. +// the specified project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -47190,6 +55796,7 @@ func (c *HttpHealthChecksListCall) Header() http.Header { func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47251,7 +55858,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } return ret, nil // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.httpHealthChecks.list", // "parameterOrder": [ @@ -47338,6 +55945,7 @@ type HttpHealthChecksPatchCall struct { // Patch: Updates a HttpHealthCheck resource in the specified project // using the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. +// (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -47393,6 +56001,7 @@ func (c *HttpHealthChecksPatchCall) Header() http.Header { func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47457,7 +56066,7 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.httpHealthChecks.patch", // "parameterOrder": [ @@ -47513,7 +56122,8 @@ type HttpHealthChecksUpdateCall struct { } // Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -47569,6 +56179,7 @@ func (c *HttpHealthChecksUpdateCall) Header() http.Header { func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47633,7 +56244,7 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.httpHealthChecks.update", // "parameterOrder": [ @@ -47687,7 +56298,8 @@ type HttpsHealthChecksDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified HttpsHealthCheck resource. +// Delete: Deletes the specified HttpsHealthCheck resource. (== +// suppress_warning http-rest-shadowed ==) func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -47741,6 +56353,7 @@ func (c *HttpsHealthChecksDeleteCall) Header() http.Header { func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47800,7 +56413,7 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified HttpsHealthCheck resource.", + // "description": "Deletes the specified HttpsHealthCheck resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.httpsHealthChecks.delete", // "parameterOrder": [ @@ -47853,7 +56466,8 @@ type HttpsHealthChecksGetCall struct { } // Get: Returns the specified HttpsHealthCheck resource. Gets a list of -// available HTTPS health checks by making a list() request. +// available HTTPS health checks by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -47898,6 +56512,7 @@ func (c *HttpsHealthChecksGetCall) Header() http.Header { func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47960,7 +56575,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.httpsHealthChecks.get", // "parameterOrder": [ @@ -48008,7 +56623,8 @@ type HttpsHealthChecksInsertCall struct { } // Insert: Creates a HttpsHealthCheck resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48062,6 +56678,7 @@ func (c *HttpsHealthChecksInsertCall) Header() http.Header { func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48125,7 +56742,7 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.httpsHealthChecks.insert", // "parameterOrder": [ @@ -48172,7 +56789,7 @@ type HttpsHealthChecksListCall struct { } // List: Retrieves the list of HttpsHealthCheck resources available to -// the specified project. +// the specified project. (== suppress_warning http-rest-shadowed ==) func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48279,6 +56896,7 @@ func (c *HttpsHealthChecksListCall) Header() http.Header { func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48340,7 +56958,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } return ret, nil // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.httpsHealthChecks.list", // "parameterOrder": [ @@ -48427,6 +57045,7 @@ type HttpsHealthChecksPatchCall struct { // Patch: Updates a HttpsHealthCheck resource in the specified project // using the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. +// (== suppress_warning http-rest-shadowed ==) func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48481,6 +57100,7 @@ func (c *HttpsHealthChecksPatchCall) Header() http.Header { func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48545,7 +57165,7 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.httpsHealthChecks.patch", // "parameterOrder": [ @@ -48601,7 +57221,8 @@ type HttpsHealthChecksUpdateCall struct { } // Update: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -48656,6 +57277,7 @@ func (c *HttpsHealthChecksUpdateCall) Header() http.Header { func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48720,7 +57342,7 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.httpsHealthChecks.update", // "parameterOrder": [ @@ -48774,7 +57396,8 @@ type ImagesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified image. +// Delete: Deletes the specified image. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -48829,6 +57452,7 @@ func (c *ImagesDeleteCall) Header() http.Header { func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48888,7 +57512,7 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified image.", + // "description": "Deletes the specified image. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.images.delete", // "parameterOrder": [ @@ -48943,7 +57567,7 @@ type ImagesDeprecateCall struct { // Deprecate: Sets the deprecation status of an image. // // If an empty request body is given, clears the deprecation status -// instead. +// instead. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -48999,6 +57623,7 @@ func (c *ImagesDeprecateCall) Header() http.Header { func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49063,7 +57688,7 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.images.deprecate", // "parameterOrder": [ @@ -49119,7 +57744,7 @@ type ImagesGetCall struct { } // Get: Returns the specified image. Gets a list of available images by -// making a list() request. +// making a list() request. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/images/get func (r *ImagesService) Get(project string, image string) *ImagesGetCall { c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -49165,6 +57790,7 @@ func (c *ImagesGetCall) Header() http.Header { func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49227,7 +57853,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } return ret, nil // { - // "description": "Returns the specified image. Gets a list of available images by making a list() request.", + // "description": "Returns the specified image. Gets a list of available images by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.images.get", // "parameterOrder": [ @@ -49276,7 +57902,8 @@ type ImagesGetFromFamilyCall struct { } // GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. +// family and is not deprecated. (== suppress_warning http-rest-shadowed +// ==) func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -49321,6 +57948,7 @@ func (c *ImagesGetFromFamilyCall) Header() http.Header { func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49383,7 +58011,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "description": "Returns the latest image that is part of an image family and is not deprecated. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.images.getFromFamily", // "parameterOrder": [ @@ -49432,7 +58060,8 @@ type ImagesGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -49477,6 +58106,7 @@ func (c *ImagesGetIamPolicyCall) Header() http.Header { func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49539,7 +58169,7 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.images.getIamPolicy", // "parameterOrder": [ @@ -49587,7 +58217,7 @@ type ImagesInsertCall struct { } // Insert: Creates an image in the specified project using the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -49649,6 +58279,7 @@ func (c *ImagesInsertCall) Header() http.Header { func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49712,7 +58343,7 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates an image in the specified project using the data included in the request.", + // "description": "Creates an image in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.images.insert", // "parameterOrder": [ @@ -49772,7 +58403,7 @@ type ImagesListCall struct { // projects, including publicly-available images, like Debian 8. If you // want to get a list of publicly-available images, use this method to // make a request to the respective image project, such as debian-cloud -// or windows-cloud. +// or windows-cloud. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/images/list func (r *ImagesService) List(project string) *ImagesListCall { c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -49880,6 +58511,7 @@ func (c *ImagesListCall) Header() http.Header { func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49941,7 +58573,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { } return ret, nil // { - // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.images.list", // "parameterOrder": [ @@ -50026,7 +58658,8 @@ type ImagesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -50062,6 +58695,7 @@ func (c *ImagesSetIamPolicyCall) Header() http.Header { func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50126,7 +58760,7 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.images.setIamPolicy", // "parameterOrder": [ @@ -50177,7 +58811,8 @@ type ImagesSetLabelsCall struct { } // SetLabels: Sets the labels on an image. To learn more about labels, -// read the Labeling Resources documentation. +// read the Labeling Resources documentation. (== suppress_warning +// http-rest-shadowed ==) func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -50213,6 +58848,7 @@ func (c *ImagesSetLabelsCall) Header() http.Header { func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50277,7 +58913,7 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.images.setLabels", // "parameterOrder": [ @@ -50328,7 +58964,7 @@ type ImagesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -50364,6 +59000,7 @@ func (c *ImagesTestIamPermissionsCall) Header() http.Header { func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50428,7 +59065,7 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.images.testIamPermissions", // "parameterOrder": [ @@ -50496,7 +59133,7 @@ type InstanceGroupManagersAbandonInstancesCall struct { // deleted. // // You can specify a maximum of 1000 instances with this method per -// request. +// request. (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -50552,6 +59189,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50617,7 +59255,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.abandonInstances", // "parameterOrder": [ @@ -50678,7 +59316,7 @@ type InstanceGroupManagersAggregatedListCall struct { } // AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. +// groups them by zone. (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -50785,6 +59423,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50847,7 +59486,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "description": "Retrieves the list of managed instance groups and groups them by zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instanceGroupManagers.aggregatedList", // "parameterOrder": [ @@ -50934,7 +59573,7 @@ type InstanceGroupManagersDeleteCall struct { // Delete: Deletes the specified managed instance group and all of the // instances in that group. Note that the instance group must not belong // to a backend service. Read Deleting an instance group for more -// information. +// information. (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -50989,6 +59628,7 @@ func (c *InstanceGroupManagersDeleteCall) Header() http.Header { func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51049,7 +59689,7 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.instanceGroupManagers.delete", // "parameterOrder": [ @@ -51123,7 +59763,7 @@ type InstanceGroupManagersDeleteInstancesCall struct { // deleted. // // You can specify a maximum of 1000 instances with this method per -// request. +// request. (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -51179,6 +59819,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51244,7 +59885,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.deleteInstances", // "parameterOrder": [ @@ -51308,7 +59949,7 @@ type InstanceGroupManagersGetCall struct { // Get: Returns all of the details about the specified managed instance // group. Gets a list of available managed instance groups by making a -// list() request. +// list() request. (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -51354,6 +59995,7 @@ func (c *InstanceGroupManagersGetCall) Header() http.Header { func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51417,7 +60059,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instanceGroupManagers.get", // "parameterOrder": [ @@ -51481,6 +60123,7 @@ type InstanceGroupManagersInsertCall struct { // // A managed instance group can have up to 1000 VM instances per group. // Please contact Cloud Support if you need an increase in this limit. +// (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -51535,6 +60178,7 @@ func (c *InstanceGroupManagersInsertCall) Header() http.Header { func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51599,7 +60243,7 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.insert", // "parameterOrder": [ @@ -51654,7 +60298,8 @@ type InstanceGroupManagersListCall struct { } // List: Retrieves a list of managed instance groups that are contained -// within the specified project and zone. +// within the specified project and zone. (== suppress_warning +// http-rest-shadowed ==) func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -51762,6 +60407,7 @@ func (c *InstanceGroupManagersListCall) Header() http.Header { func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51824,7 +60470,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta } return ret, nil // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instanceGroupManagers.list", // "parameterOrder": [ @@ -51920,7 +60566,8 @@ type InstanceGroupManagersListManagedInstancesCall struct { // indicates the action that the managed instance group is performing on // the instance. For example, if the group is still creating an // instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. +// the list displays the errors for that failed action. (== +// suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -52019,6 +60666,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52081,7 +60729,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listManagedInstances", // "parameterOrder": [ @@ -52165,7 +60813,7 @@ type InstanceGroupManagersPatchCall struct { // process of being patched. You must separately verify the status of // the individual instances with the listManagedInstances method. This // method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. +// and processing rules. (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -52221,6 +60869,7 @@ func (c *InstanceGroupManagersPatchCall) Header() http.Header { func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52286,7 +60935,7 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.instanceGroupManagers.patch", // "parameterOrder": [ @@ -52362,7 +61011,7 @@ type InstanceGroupManagersRecreateInstancesCall struct { // deleted. // // You can specify a maximum of 1000 instances with this method per -// request. +// request. (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -52418,6 +61067,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52483,7 +61133,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ @@ -52565,6 +61215,7 @@ type InstanceGroupManagersResizeCall struct { // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining // duration has elapsed before the VM instance is removed or deleted. +// (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -52620,6 +61271,7 @@ func (c *InstanceGroupManagersResizeCall) Header() http.Header { func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52680,7 +61332,7 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.resize", // "parameterOrder": [ @@ -52749,7 +61401,8 @@ type InstanceGroupManagersSetInstanceTemplateCall struct { // SetInstanceTemplate: Specifies the instance template to use when // creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. +// instances in the group do not change unless you recreate them. (== +// suppress_warning http-rest-shadowed ==) func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -52805,6 +61458,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52870,7 +61524,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ @@ -52938,7 +61592,8 @@ type InstanceGroupManagersSetTargetPoolsCall struct { // group. This operation is marked DONE when you make the request even // if the instances have not yet been added to their target pools. The // change might take some time to apply to all of the instances in the -// group depending on the size of the group. +// group depending on the size of the group. (== suppress_warning +// http-rest-shadowed ==) func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -52994,6 +61649,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53059,7 +61715,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.setTargetPools", // "parameterOrder": [ @@ -53123,7 +61779,8 @@ type InstanceGroupsAddInstancesCall struct { // AddInstances: Adds a list of instances to the specified instance // group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. +// network/subnetwork. Read Adding instances for more information. (== +// suppress_warning http-rest-shadowed ==) func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -53179,6 +61836,7 @@ func (c *InstanceGroupsAddInstancesCall) Header() http.Header { func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53244,7 +61902,7 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroups.addInstances", // "parameterOrder": [ @@ -53305,7 +61963,7 @@ type InstanceGroupsAggregatedListCall struct { } // AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. +// by zone. (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -53412,6 +62070,7 @@ func (c *InstanceGroupsAggregatedListCall) Header() http.Header { func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53473,7 +62132,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In } return ret, nil // { - // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "description": "Retrieves the list of instance groups and sorts them by zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instanceGroups.aggregatedList", // "parameterOrder": [ @@ -53560,7 +62219,7 @@ type InstanceGroupsDeleteCall struct { // Delete: Deletes the specified instance group. The instances in the // group are not deleted. Note that instance group must not belong to a // backend service. Read Deleting an instance group for more -// information. +// information. (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -53615,6 +62274,7 @@ func (c *InstanceGroupsDeleteCall) Header() http.Header { func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53675,7 +62335,7 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.instanceGroups.delete", // "parameterOrder": [ @@ -53735,7 +62395,8 @@ type InstanceGroupsGetCall struct { } // Get: Returns the specified instance group. Gets a list of available -// instance groups by making a list() request. +// instance groups by making a list() request. (== suppress_warning +// http-rest-shadowed ==) func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -53781,6 +62442,7 @@ func (c *InstanceGroupsGetCall) Header() http.Header { func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53844,7 +62506,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instanceGroups.get", // "parameterOrder": [ @@ -53899,7 +62561,8 @@ type InstanceGroupsInsertCall struct { } // Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. +// parameters that are included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -53954,6 +62617,7 @@ func (c *InstanceGroupsInsertCall) Header() http.Header { func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54018,7 +62682,7 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + // "description": "Creates an instance group in the specified project using the parameters that are included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroups.insert", // "parameterOrder": [ @@ -54073,7 +62737,8 @@ type InstanceGroupsListCall struct { } // List: Retrieves the list of instance groups that are located in the -// specified project and zone. +// specified project and zone. (== suppress_warning http-rest-shadowed +// ==) func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54181,6 +62846,7 @@ func (c *InstanceGroupsListCall) Header() http.Header { func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54243,7 +62909,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "description": "Retrieves the list of instance groups that are located in the specified project and zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instanceGroups.list", // "parameterOrder": [ @@ -54336,6 +63002,7 @@ type InstanceGroupsListInstancesCall struct { } // ListInstances: Lists the instances in the specified instance group. +// (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54435,6 +63102,7 @@ func (c *InstanceGroupsListInstancesCall) Header() http.Header { func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54500,7 +63168,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group.", + // "description": "Lists the instances in the specified instance group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ @@ -54607,7 +63275,8 @@ type InstanceGroupsRemoveInstancesCall struct { // // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining -// duration before the VM instance is removed or deleted. +// duration before the VM instance is removed or deleted. (== +// suppress_warning http-rest-shadowed ==) func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54663,6 +63332,7 @@ func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54728,7 +63398,7 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroups.removeInstances", // "parameterOrder": [ @@ -54791,6 +63461,7 @@ type InstanceGroupsSetNamedPortsCall struct { } // SetNamedPorts: Sets the named ports for the specified instance group. +// (== suppress_warning http-rest-shadowed ==) func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -54846,6 +63517,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54911,7 +63583,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Sets the named ports for the specified instance group.", + // "description": "Sets the named ports for the specified instance group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceGroups.setNamedPorts", // "parameterOrder": [ @@ -54974,6 +63646,7 @@ type InstanceTemplatesDeleteCall struct { // Delete: Deletes the specified instance template. Deleting an instance // template is permanent and cannot be undone. It is not possible to // delete templates that are already in use by a managed instance group. +// (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -55028,6 +63701,7 @@ func (c *InstanceTemplatesDeleteCall) Header() http.Header { func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55087,7 +63761,7 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", + // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.instanceTemplates.delete", // "parameterOrder": [ @@ -55140,7 +63814,8 @@ type InstanceTemplatesGetCall struct { } // Get: Returns the specified instance template. Gets a list of -// available instance templates by making a list() request. +// available instance templates by making a list() request. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -55186,6 +63861,7 @@ func (c *InstanceTemplatesGetCall) Header() http.Header { func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55248,7 +63924,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instanceTemplates.get", // "parameterOrder": [ @@ -55297,7 +63973,8 @@ type InstanceTemplatesGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -55342,6 +64019,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55404,7 +64082,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instanceTemplates.getIamPolicy", // "parameterOrder": [ @@ -55455,7 +64133,8 @@ type InstanceTemplatesInsertCall struct { // the data that is included in the request. If you are creating a new // template to update an existing instance group, your new instance // template must use the same network or, if applicable, the same -// subnetwork as the original template. +// subnetwork as the original template. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -55510,6 +64189,7 @@ func (c *InstanceTemplatesInsertCall) Header() http.Header { func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55573,7 +64253,7 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceTemplates.insert", // "parameterOrder": [ @@ -55620,7 +64300,8 @@ type InstanceTemplatesListCall struct { } // List: Retrieves a list of instance templates that are contained -// within the specified project. +// within the specified project. (== suppress_warning http-rest-shadowed +// ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -55728,6 +64409,7 @@ func (c *InstanceTemplatesListCall) Header() http.Header { func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55789,7 +64471,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT } return ret, nil // { - // "description": "Retrieves a list of instance templates that are contained within the specified project.", + // "description": "Retrieves a list of instance templates that are contained within the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instanceTemplates.list", // "parameterOrder": [ @@ -55874,7 +64556,8 @@ type InstanceTemplatesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -55910,6 +64593,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55974,7 +64658,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceTemplates.setIamPolicy", // "parameterOrder": [ @@ -56025,7 +64709,7 @@ type InstanceTemplatesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -56061,6 +64745,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56125,7 +64810,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instanceTemplates.testIamPermissions", // "parameterOrder": [ @@ -56178,7 +64863,7 @@ type InstancesAddAccessConfigCall struct { } // AddAccessConfig: Adds an access config to an instance's network -// interface. +// interface. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -56236,6 +64921,7 @@ func (c *InstancesAddAccessConfigCall) Header() http.Header { func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56301,7 +64987,7 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds an access config to an instance's network interface.", + // "description": "Adds an access config to an instance's network interface. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.addAccessConfig", // "parameterOrder": [ @@ -56371,7 +65057,8 @@ type InstancesAggregatedListCall struct { } // AggregatedList: Retrieves aggregated list of all of the instances in -// your project across all regions and zones. +// your project across all regions and zones. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -56479,6 +65166,7 @@ func (c *InstancesAggregatedListCall) Header() http.Header { func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56540,7 +65228,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", + // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instances.aggregatedList", // "parameterOrder": [ @@ -56628,7 +65316,8 @@ type InstancesAttachDiskCall struct { // AttachDisk: Attaches an existing Disk resource to an instance. You // must first create the disk before you can attach it. It is not // possible to create and attach a disk at the same time. For more -// information, read Adding a persistent disk to your instance. +// information, read Adding a persistent disk to your instance. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -56693,6 +65382,7 @@ func (c *InstancesAttachDiskCall) Header() http.Header { func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56758,7 +65448,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", + // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.attachDisk", // "parameterOrder": [ @@ -56827,7 +65517,8 @@ type InstancesDeleteCall struct { } // Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. +// information, see Stopping or Deleting an Instance. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -56883,6 +65574,7 @@ func (c *InstancesDeleteCall) Header() http.Header { func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56943,7 +65635,7 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.instances.delete", // "parameterOrder": [ @@ -57004,7 +65696,7 @@ type InstancesDeleteAccessConfigCall struct { } // DeleteAccessConfig: Deletes an access config from an instance's -// network interface. +// network interface. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -57062,6 +65754,7 @@ func (c *InstancesDeleteAccessConfigCall) Header() http.Header { func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57122,7 +65815,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes an access config from an instance's network interface.", + // "description": "Deletes an access config from an instance's network interface. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.deleteAccessConfig", // "parameterOrder": [ @@ -57196,7 +65889,8 @@ type InstancesDetachDiskCall struct { header_ http.Header } -// DetachDisk: Detaches a disk from an instance. +// DetachDisk: Detaches a disk from an instance. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -57253,6 +65947,7 @@ func (c *InstancesDetachDiskCall) Header() http.Header { func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57313,7 +66008,7 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Detaches a disk from an instance.", + // "description": "Detaches a disk from an instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.detachDisk", // "parameterOrder": [ @@ -57382,7 +66077,8 @@ type InstancesGetCall struct { } // Get: Returns the specified Instance resource. Gets a list of -// available instances by making a list() request. +// available instances by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -57429,6 +66125,7 @@ func (c *InstancesGetCall) Header() http.Header { func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57492,7 +66189,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instances.get", // "parameterOrder": [ @@ -57536,6 +66233,198 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } +// method id "compute.instances.getGuestAttributes": + +type InstancesGetGuestAttributesCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetGuestAttributes: Returns the specified guest attributes entry. (== +// suppress_warning http-rest-shadowed ==) +func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { + c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// QueryPath sets the optional parameter "queryPath": Specifies the +// guest attributes path to be queried. +func (c *InstancesGetGuestAttributesCall) QueryPath(queryPath string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("queryPath", queryPath) + return c +} + +// VariableKey sets the optional parameter "variableKey": Specifies the +// key for the guest attributes entry. +func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("variableKey", variableKey) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesGetGuestAttributesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.getGuestAttributes" call. +// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *GuestAttributes.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GuestAttributes{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified guest attributes entry. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.instances.getGuestAttributes", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "queryPath": { + // "description": "Specifies the guest attributes path to be queried.", + // "location": "query", + // "type": "string" + // }, + // "variableKey": { + // "description": "Specifies the key for the guest attributes entry.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", + // "response": { + // "$ref": "GuestAttributes" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.instances.getIamPolicy": type InstancesGetIamPolicyCall struct { @@ -57550,7 +66439,8 @@ type InstancesGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -57596,6 +66486,7 @@ func (c *InstancesGetIamPolicyCall) Header() http.Header { func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57659,7 +66550,7 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instances.getIamPolicy", // "parameterOrder": [ @@ -57717,7 +66608,7 @@ type InstancesGetSerialPortOutputCall struct { } // GetSerialPortOutput: Returns the last 1 MB of serial port output from -// the specified instance. +// the specified instance. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -57782,6 +66673,7 @@ func (c *InstancesGetSerialPortOutputCall) Header() http.Header { func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57845,7 +66737,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns the last 1 MB of serial port output from the specified instance.", + // "description": "Returns the last 1 MB of serial port output from the specified instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instances.getSerialPortOutput", // "parameterOrder": [ @@ -57918,7 +66810,7 @@ type InstancesGetShieldedInstanceIdentityCall struct { } // GetShieldedInstanceIdentity: Returns the Shielded Instance Identity -// of an instance +// of an instance (== suppress_warning http-rest-shadowed ==) func (r *InstancesService) GetShieldedInstanceIdentity(project string, zone string, instance string) *InstancesGetShieldedInstanceIdentityCall { c := &InstancesGetShieldedInstanceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -57964,6 +66856,7 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58027,7 +66920,7 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Returns the Shielded Instance Identity of an instance", + // "description": "Returns the Shielded Instance Identity of an instance (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instances.getShieldedInstanceIdentity", // "parameterOrder": [ @@ -58084,7 +66977,8 @@ type InstancesInsertCall struct { } // Insert: Creates an instance resource in the specified project using -// the data included in the request. +// the data included in the request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -58155,6 +67049,7 @@ func (c *InstancesInsertCall) Header() http.Header { func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58219,7 +67114,7 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an instance resource in the specified project using the data included in the request.", + // "description": "Creates an instance resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.insert", // "parameterOrder": [ @@ -58280,7 +67175,7 @@ type InstancesListCall struct { } // List: Retrieves the list of instances contained within the specified -// zone. +// zone. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list func (r *InstancesService) List(project string, zone string) *InstancesListCall { c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -58389,6 +67284,7 @@ func (c *InstancesListCall) Header() http.Header { func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58451,7 +67347,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } return ret, nil // { - // "description": "Retrieves the list of instances contained within the specified zone.", + // "description": "Retrieves the list of instances contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instances.list", // "parameterOrder": [ @@ -58546,7 +67442,8 @@ type InstancesListReferrersCall struct { // ListReferrers: Retrieves the list of referrers to instances contained // within the specified zone. For more information, read Viewing -// Referrers to VM Instances. +// Referrers to VM Instances. (== suppress_warning http-rest-shadowed +// ==) func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -58655,6 +67552,7 @@ func (c *InstancesListReferrersCall) Header() http.Header { func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58718,7 +67616,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.instances.listReferrers", // "parameterOrder": [ @@ -58820,7 +67718,7 @@ type InstancesResetCall struct { // Reset: Performs a reset on the instance. This is a hard reset the VM // does not do a graceful shutdown. For more information, see Resetting -// an instance. +// an instance. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -58876,6 +67774,7 @@ func (c *InstancesResetCall) Header() http.Header { func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58936,7 +67835,7 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", + // "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.reset", // "parameterOrder": [ @@ -58996,7 +67895,8 @@ type InstancesSetDeletionProtectionCall struct { header_ http.Header } -// SetDeletionProtection: Sets deletion protection on the instance. +// SetDeletionProtection: Sets deletion protection on the instance. (== +// suppress_warning http-rest-shadowed ==) func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -59058,6 +67958,7 @@ func (c *InstancesSetDeletionProtectionCall) Header() http.Header { func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59118,7 +68019,7 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets deletion protection on the instance.", + // "description": "Sets deletion protection on the instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setDeletionProtection", // "parameterOrder": [ @@ -59185,7 +68086,7 @@ type InstancesSetDiskAutoDeleteCall struct { } // SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. +// an instance. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -59243,6 +68144,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59303,7 +68205,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "description": "Sets the auto-delete flag for a disk attached to an instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setDiskAutoDelete", // "parameterOrder": [ @@ -59380,7 +68282,8 @@ type InstancesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -59417,6 +68320,7 @@ func (c *InstancesSetIamPolicyCall) Header() http.Header { func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59482,7 +68386,7 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setIamPolicy", // "parameterOrder": [ @@ -59542,7 +68446,8 @@ type InstancesSetLabelsCall struct { } // SetLabels: Sets labels on an instance. To learn more about labels, -// read the Labeling Resources documentation. +// read the Labeling Resources documentation. (== suppress_warning +// http-rest-shadowed ==) func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -59598,6 +68503,7 @@ func (c *InstancesSetLabelsCall) Header() http.Header { func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59663,7 +68569,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setLabels", // "parameterOrder": [ @@ -59728,7 +68634,8 @@ type InstancesSetMachineResourcesCall struct { } // SetMachineResources: Changes the number and/or type of accelerator -// for a stopped instance to the values specified in the request. +// for a stopped instance to the values specified in the request. (== +// suppress_warning http-rest-shadowed ==) func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -59784,6 +68691,7 @@ func (c *InstancesSetMachineResourcesCall) Header() http.Header { func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59849,7 +68757,7 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setMachineResources", // "parameterOrder": [ @@ -59914,7 +68822,8 @@ type InstancesSetMachineTypeCall struct { } // SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. +// the machine type specified in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -59970,6 +68879,7 @@ func (c *InstancesSetMachineTypeCall) Header() http.Header { func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60035,7 +68945,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setMachineType", // "parameterOrder": [ @@ -60100,7 +69010,7 @@ type InstancesSetMetadataCall struct { } // SetMetadata: Sets metadata for the specified instance to the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -60157,6 +69067,7 @@ func (c *InstancesSetMetadataCall) Header() http.Header { func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60222,7 +69133,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets metadata for the specified instance to the data included in the request.", + // "description": "Sets metadata for the specified instance to the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setMetadata", // "parameterOrder": [ @@ -60289,7 +69200,7 @@ type InstancesSetMinCpuPlatformCall struct { // SetMinCpuPlatform: Changes the minimum CPU platform that this // instance should use. This method can only be called on a stopped // instance. For more information, read Specifying a Minimum CPU -// Platform. +// Platform. (== suppress_warning http-rest-shadowed ==) func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -60345,6 +69256,7 @@ func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60410,7 +69322,7 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setMinCpuPlatform", // "parameterOrder": [ @@ -60474,7 +69386,8 @@ type InstancesSetSchedulingCall struct { header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. +// SetScheduling: Sets an instance's scheduling options. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -60531,6 +69444,7 @@ func (c *InstancesSetSchedulingCall) Header() http.Header { func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60596,7 +69510,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options.", + // "description": "Sets an instance's scheduling options. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setScheduling", // "parameterOrder": [ @@ -60662,7 +69576,7 @@ type InstancesSetServiceAccountCall struct { // SetServiceAccount: Sets the service account on the instance. For more // information, read Changing the service account and access scopes for -// an instance. +// an instance. (== suppress_warning http-rest-shadowed ==) func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -60718,6 +69632,7 @@ func (c *InstancesSetServiceAccountCall) Header() http.Header { func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60783,7 +69698,7 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ @@ -60850,7 +69765,8 @@ type InstancesSetShieldedInstanceIntegrityPolicyCall struct { // SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance // integrity policy for an instance. You can only use this method on a // running instance. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. +// JSON merge patch format and processing rules. (== suppress_warning +// http-rest-shadowed ==) func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -60906,6 +69822,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60971,7 +69888,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", // "parameterOrder": [ @@ -61036,7 +69953,7 @@ type InstancesSetTagsCall struct { } // SetTags: Sets network tags for the specified instance to the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -61093,6 +70010,7 @@ func (c *InstancesSetTagsCall) Header() http.Header { func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61158,7 +70076,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets network tags for the specified instance to the data included in the request.", + // "description": "Sets network tags for the specified instance to the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.setTags", // "parameterOrder": [ @@ -61222,7 +70140,7 @@ type InstancesSimulateMaintenanceEventCall struct { } // SimulateMaintenanceEvent: Simulates a maintenance event on the -// instance. +// instance. (== suppress_warning http-rest-shadowed ==) func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -61258,6 +70176,7 @@ func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61318,7 +70237,7 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Simulates a maintenance event on the instance.", + // "description": "Simulates a maintenance event on the instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.simulateMaintenanceEvent", // "parameterOrder": [ @@ -61374,7 +70293,8 @@ type InstancesStartCall struct { } // Start: Starts an instance that was stopped using the instances().stop -// method. For more information, see Restart an instance. +// method. For more information, see Restart an instance. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -61430,6 +70350,7 @@ func (c *InstancesStartCall) Header() http.Header { func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61490,7 +70411,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.start", // "parameterOrder": [ @@ -61553,7 +70474,7 @@ type InstancesStartWithEncryptionKeyCall struct { // StartWithEncryptionKey: Starts an instance that was stopped using the // instances().stop method. For more information, see Restart an -// instance. +// instance. (== suppress_warning http-rest-shadowed ==) func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -61609,6 +70530,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61674,7 +70596,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.startWithEncryptionKey", // "parameterOrder": [ @@ -61742,7 +70664,8 @@ type InstancesStopCall struct { // incur VM usage charges while they are stopped. However, resources // that the VM is using, such as persistent disks and static IP // addresses, will continue to be charged until they are deleted. For -// more information, see Stopping an instance. +// more information, see Stopping an instance. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -61798,6 +70721,7 @@ func (c *InstancesStopCall) Header() http.Header { func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61858,7 +70782,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.stop", // "parameterOrder": [ @@ -61920,7 +70844,7 @@ type InstancesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -61957,6 +70881,7 @@ func (c *InstancesTestIamPermissionsCall) Header() http.Header { func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62022,7 +70947,7 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.testIamPermissions", // "parameterOrder": [ @@ -62085,7 +71010,8 @@ type InstancesUpdateAccessConfigCall struct { // UpdateAccessConfig: Updates the specified access config from an // instance's network interface with the data included in the request. // This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. +// format and processing rules. (== suppress_warning http-rest-shadowed +// ==) func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -62142,6 +71068,7 @@ func (c *InstancesUpdateAccessConfigCall) Header() http.Header { func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62207,7 +71134,7 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.instances.updateAccessConfig", // "parameterOrder": [ @@ -62265,6 +71192,195 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } +// method id "compute.instances.updateDisplayDevice": + +type InstancesUpdateDisplayDeviceCall struct { + s *Service + project string + zone string + instance string + displaydevice *DisplayDevice + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// UpdateDisplayDevice: Updates the Display config for a VM instance. +// You can only use this method on a stopped VM instance. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. (== suppress_warning http-rest-shadowed ==) +func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { + c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.displaydevice = displaydevice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *InstancesUpdateDisplayDeviceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *InstancesUpdateDisplayDeviceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesUpdateDisplayDeviceCall) Context(ctx context.Context) *InstancesUpdateDisplayDeviceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.displaydevice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.updateDisplayDevice" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateDisplayDevice", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", + // "request": { + // "$ref": "DisplayDevice" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.updateNetworkInterface": type InstancesUpdateNetworkInterfaceCall struct { @@ -62279,7 +71395,8 @@ type InstancesUpdateNetworkInterfaceCall struct { } // UpdateNetworkInterface: Updates an instance's network interface. This -// method follows PATCH semantics. +// method follows PATCH semantics. (== suppress_warning +// http-rest-shadowed ==) func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -62336,6 +71453,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62401,7 +71519,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates an instance's network interface. This method follows PATCH semantics.", + // "description": "Updates an instance's network interface. This method follows PATCH semantics. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.instances.updateNetworkInterface", // "parameterOrder": [ @@ -62475,7 +71593,8 @@ type InstancesUpdateShieldedInstanceConfigCall struct { // UpdateShieldedInstanceConfig: Updates the Shielded Instance config // for an instance. You can only use this method on a stopped instance. // This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. +// format and processing rules. (== suppress_warning http-rest-shadowed +// ==) func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone string, instance string, shieldedinstanceconfig *ShieldedInstanceConfig) *InstancesUpdateShieldedInstanceConfigCall { c := &InstancesUpdateShieldedInstanceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -62531,6 +71650,7 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62596,7 +71716,7 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.instances.updateShieldedInstanceConfig", // "parameterOrder": [ @@ -62659,7 +71779,7 @@ type InterconnectAttachmentsAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of interconnect -// attachments. +// attachments. (== suppress_warning http-rest-shadowed ==) func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -62766,6 +71886,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62828,7 +71949,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Retrieves an aggregated list of interconnect attachments.", + // "description": "Retrieves an aggregated list of interconnect attachments. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.interconnectAttachments.aggregatedList", // "parameterOrder": [ @@ -62912,7 +72033,8 @@ type InterconnectAttachmentsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified interconnect attachment. +// Delete: Deletes the specified interconnect attachment. (== +// suppress_warning http-rest-shadowed ==) func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -62967,6 +72089,7 @@ func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63027,7 +72150,7 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified interconnect attachment.", + // "description": "Deletes the specified interconnect attachment. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.interconnectAttachments.delete", // "parameterOrder": [ @@ -63088,7 +72211,8 @@ type InterconnectAttachmentsGetCall struct { header_ http.Header } -// Get: Returns the specified interconnect attachment. +// Get: Returns the specified interconnect attachment. (== +// suppress_warning http-rest-shadowed ==) func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63134,6 +72258,7 @@ func (c *InterconnectAttachmentsGetCall) Header() http.Header { func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63197,7 +72322,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } return ret, nil // { - // "description": "Returns the specified interconnect attachment.", + // "description": "Returns the specified interconnect attachment. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.interconnectAttachments.get", // "parameterOrder": [ @@ -63254,7 +72379,8 @@ type InterconnectAttachmentsInsertCall struct { } // Insert: Creates an InterconnectAttachment in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63309,6 +72435,7 @@ func (c *InterconnectAttachmentsInsertCall) Header() http.Header { func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63373,7 +72500,7 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.interconnectAttachments.insert", // "parameterOrder": [ @@ -63429,7 +72556,7 @@ type InterconnectAttachmentsListCall struct { } // List: Retrieves the list of interconnect attachments contained within -// the specified region. +// the specified region. (== suppress_warning http-rest-shadowed ==) func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63537,6 +72664,7 @@ func (c *InterconnectAttachmentsListCall) Header() http.Header { func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63599,7 +72727,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "description": "Retrieves the list of interconnect attachments contained within the specified region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.interconnectAttachments.list", // "parameterOrder": [ @@ -63694,7 +72822,8 @@ type InterconnectAttachmentsPatchCall struct { // Patch: Updates the specified interconnect attachment with the data // included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. +// uses the JSON merge patch format and processing rules. (== +// suppress_warning http-rest-shadowed ==) func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63750,6 +72879,7 @@ func (c *InterconnectAttachmentsPatchCall) Header() http.Header { func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63815,7 +72945,7 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.interconnectAttachments.patch", // "parameterOrder": [ @@ -63880,7 +73010,7 @@ type InterconnectLocationsGetCall struct { // Get: Returns the details for the specified interconnect location. // Gets a list of available interconnect locations by making a list() -// request. +// request. (== suppress_warning http-rest-shadowed ==) func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63925,6 +73055,7 @@ func (c *InterconnectLocationsGetCall) Header() http.Header { func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63987,7 +73118,7 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } return ret, nil // { - // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.interconnectLocations.get", // "parameterOrder": [ @@ -64035,7 +73166,7 @@ type InterconnectLocationsListCall struct { } // List: Retrieves the list of interconnect locations available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64142,6 +73273,7 @@ func (c *InterconnectLocationsListCall) Header() http.Header { func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64203,7 +73335,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } return ret, nil // { - // "description": "Retrieves the list of interconnect locations available to the specified project.", + // "description": "Retrieves the list of interconnect locations available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.interconnectLocations.list", // "parameterOrder": [ @@ -64286,7 +73418,8 @@ type InterconnectsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified interconnect. +// Delete: Deletes the specified interconnect. (== suppress_warning +// http-rest-shadowed ==) func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64340,6 +73473,7 @@ func (c *InterconnectsDeleteCall) Header() http.Header { func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64399,7 +73533,7 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified interconnect.", + // "description": "Deletes the specified interconnect. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.interconnects.delete", // "parameterOrder": [ @@ -64452,7 +73586,8 @@ type InterconnectsGetCall struct { } // Get: Returns the specified interconnect. Get a list of available -// interconnects by making a list() request. +// interconnects by making a list() request. (== suppress_warning +// http-rest-shadowed ==) func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64497,6 +73632,7 @@ func (c *InterconnectsGetCall) Header() http.Header { func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64559,7 +73695,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } return ret, nil // { - // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.interconnects.get", // "parameterOrder": [ @@ -64608,7 +73744,7 @@ type InterconnectsGetDiagnosticsCall struct { } // GetDiagnostics: Returns the interconnectDiagnostics for the specified -// interconnect. +// interconnect. (== suppress_warning http-rest-shadowed ==) func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64653,6 +73789,7 @@ func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64716,7 +73853,7 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Returns the interconnectDiagnostics for the specified interconnect.", + // "description": "Returns the interconnectDiagnostics for the specified interconnect. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.interconnects.getDiagnostics", // "parameterOrder": [ @@ -64764,7 +73901,8 @@ type InterconnectsInsertCall struct { } // Insert: Creates a Interconnect in the specified project using the -// data included in the request. +// data included in the request. (== suppress_warning http-rest-shadowed +// ==) func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -64818,6 +73956,7 @@ func (c *InterconnectsInsertCall) Header() http.Header { func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64881,7 +74020,7 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a Interconnect in the specified project using the data included in the request.", + // "description": "Creates a Interconnect in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.interconnects.insert", // "parameterOrder": [ @@ -64928,7 +74067,7 @@ type InterconnectsListCall struct { } // List: Retrieves the list of interconnect available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) func (r *InterconnectsService) List(project string) *InterconnectsListCall { c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -65035,6 +74174,7 @@ func (c *InterconnectsListCall) Header() http.Header { func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65096,7 +74236,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } return ret, nil // { - // "description": "Retrieves the list of interconnect available to the specified project.", + // "description": "Retrieves the list of interconnect available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.interconnects.list", // "parameterOrder": [ @@ -65182,7 +74322,8 @@ type InterconnectsPatchCall struct { // Patch: Updates the specified interconnect with the data included in // the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. +// merge patch format and processing rules. (== suppress_warning +// http-rest-shadowed ==) func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -65237,6 +74378,7 @@ func (c *InterconnectsPatchCall) Header() http.Header { func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65301,7 +74443,7 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.interconnects.patch", // "parameterOrder": [ @@ -65358,6 +74500,7 @@ type LicenseCodesGetCall struct { // Get: Return a specified license code. License codes are mirrored // across all projects that have permissions to read the License Code. +// (== suppress_warning http-rest-shadowed ==) func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -65402,6 +74545,7 @@ func (c *LicenseCodesGetCall) Header() http.Header { func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65464,7 +74608,7 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er } return ret, nil // { - // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", + // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.licenseCodes.get", // "parameterOrder": [ @@ -65513,7 +74657,7 @@ type LicenseCodesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -65549,6 +74693,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65613,7 +74758,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.licenseCodes.testIamPermissions", // "parameterOrder": [ @@ -65663,7 +74808,8 @@ type LicensesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified license. +// Delete: Deletes the specified license. (== suppress_warning +// http-rest-shadowed ==) func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -65717,6 +74863,7 @@ func (c *LicensesDeleteCall) Header() http.Header { func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65776,7 +74923,7 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified license.", + // "description": "Deletes the specified license. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.licenses.delete", // "parameterOrder": [ @@ -65828,7 +74975,8 @@ type LicensesGetCall struct { header_ http.Header } -// Get: Returns the specified License resource. +// Get: Returns the specified License resource. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get func (r *LicensesService) Get(project string, license string) *LicensesGetCall { c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -65874,6 +75022,7 @@ func (c *LicensesGetCall) Header() http.Header { func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65936,7 +75085,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } return ret, nil // { - // "description": "Returns the specified License resource.", + // "description": "Returns the specified License resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.licenses.get", // "parameterOrder": [ @@ -65985,7 +75134,8 @@ type LicensesGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -66030,6 +75180,7 @@ func (c *LicensesGetIamPolicyCall) Header() http.Header { func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66092,7 +75243,7 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.licenses.getIamPolicy", // "parameterOrder": [ @@ -66139,7 +75290,8 @@ type LicensesInsertCall struct { header_ http.Header } -// Insert: Create a License resource in the specified project. +// Insert: Create a License resource in the specified project. (== +// suppress_warning http-rest-shadowed ==) func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -66193,6 +75345,7 @@ func (c *LicensesInsertCall) Header() http.Header { func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66256,7 +75409,7 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Create a License resource in the specified project.", + // "description": "Create a License resource in the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.licenses.insert", // "parameterOrder": [ @@ -66310,7 +75463,8 @@ type LicensesListCall struct { // projects, including licenses attached to publicly-available images, // like Debian 9. If you want to get a list of publicly-available // licenses, use this method to make a request to the respective image -// project, such as debian-cloud or windows-cloud. +// project, such as debian-cloud or windows-cloud. (== suppress_warning +// http-rest-shadowed ==) func (r *LicensesService) List(project string) *LicensesListCall { c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -66417,6 +75571,7 @@ func (c *LicensesListCall) Header() http.Header { func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66478,7 +75633,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon } return ret, nil // { - // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.licenses.list", // "parameterOrder": [ @@ -66563,7 +75718,8 @@ type LicensesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -66599,6 +75755,7 @@ func (c *LicensesSetIamPolicyCall) Header() http.Header { func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66663,7 +75820,7 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.licenses.setIamPolicy", // "parameterOrder": [ @@ -66714,7 +75871,7 @@ type LicensesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -66750,6 +75907,7 @@ func (c *LicensesTestIamPermissionsCall) Header() http.Header { func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66814,7 +75972,7 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.licenses.testIamPermissions", // "parameterOrder": [ @@ -66864,7 +76022,8 @@ type MachineTypesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of machine types. +// AggregatedList: Retrieves an aggregated list of machine types. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -66972,6 +76131,7 @@ func (c *MachineTypesAggregatedListCall) Header() http.Header { func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67033,7 +76193,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } return ret, nil // { - // "description": "Retrieves an aggregated list of machine types.", + // "description": "Retrieves an aggregated list of machine types. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.machineTypes.aggregatedList", // "parameterOrder": [ @@ -67119,7 +76279,8 @@ type MachineTypesGetCall struct { } // Get: Returns the specified machine type. Gets a list of available -// machine types by making a list() request. +// machine types by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -67166,6 +76327,7 @@ func (c *MachineTypesGetCall) Header() http.Header { func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67229,7 +76391,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } return ret, nil // { - // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", + // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.machineTypes.get", // "parameterOrder": [ @@ -67286,7 +76448,7 @@ type MachineTypesListCall struct { } // List: Retrieves a list of machine types available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -67395,6 +76557,7 @@ func (c *MachineTypesListCall) Header() http.Header { func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67457,7 +76620,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } return ret, nil // { - // "description": "Retrieves a list of machine types available to the specified project.", + // "description": "Retrieves a list of machine types available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.machineTypes.list", // "parameterOrder": [ @@ -67549,7 +76712,7 @@ type NetworkEndpointGroupsAggregatedListCall struct { } // AggregatedList: Retrieves the list of network endpoint groups and -// sorts them by zone. +// sorts them by zone. (== suppress_warning http-rest-shadowed ==) func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -67656,6 +76819,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67718,7 +76882,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + // "description": "Retrieves the list of network endpoint groups and sorts them by zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.networkEndpointGroups.aggregatedList", // "parameterOrder": [ @@ -67804,7 +76968,8 @@ type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { } // AttachNetworkEndpoints: Attach a list of network endpoints to the -// specified network endpoint group. +// specified network endpoint group. (== suppress_warning +// http-rest-shadowed ==) func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -67860,6 +77025,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67925,7 +77091,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Attach a list of network endpoints to the specified network endpoint group.", + // "description": "Attach a list of network endpoints to the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", // "parameterOrder": [ @@ -67989,7 +77155,8 @@ type NetworkEndpointGroupsDeleteCall struct { // Delete: Deletes the specified network endpoint group. The network // endpoints in the NEG and the VM instances they belong to are not // terminated when the NEG is deleted. Note that the NEG cannot be -// deleted if there are backend services referencing it. +// deleted if there are backend services referencing it. (== +// suppress_warning http-rest-shadowed ==) func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -68044,6 +77211,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68104,7 +77272,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", + // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.networkEndpointGroups.delete", // "parameterOrder": [ @@ -68164,7 +77332,8 @@ type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { } // DetachNetworkEndpoints: Detach a list of network endpoints from the -// specified network endpoint group. +// specified network endpoint group. (== suppress_warning +// http-rest-shadowed ==) func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -68220,6 +77389,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68285,7 +77455,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Detach a list of network endpoints from the specified network endpoint group.", + // "description": "Detach a list of network endpoints from the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", // "parameterOrder": [ @@ -68348,7 +77518,8 @@ type NetworkEndpointGroupsGetCall struct { } // Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. +// available network endpoint groups by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -68394,6 +77565,7 @@ func (c *NetworkEndpointGroupsGetCall) Header() http.Header { func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68457,7 +77629,7 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.networkEndpointGroups.get", // "parameterOrder": [ @@ -68512,7 +77684,8 @@ type NetworkEndpointGroupsInsertCall struct { } // Insert: Creates a network endpoint group in the specified project -// using the parameters that are included in the request. +// using the parameters that are included in the request. (== +// suppress_warning http-rest-shadowed ==) func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -68567,6 +77740,7 @@ func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68631,7 +77805,7 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.networkEndpointGroups.insert", // "parameterOrder": [ @@ -68686,7 +77860,8 @@ type NetworkEndpointGroupsListCall struct { } // List: Retrieves the list of network endpoint groups that are located -// in the specified project and zone. +// in the specified project and zone. (== suppress_warning +// http-rest-shadowed ==) func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -68794,6 +77969,7 @@ func (c *NetworkEndpointGroupsListCall) Header() http.Header { func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68856,7 +78032,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", + // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.networkEndpointGroups.list", // "parameterOrder": [ @@ -68949,7 +78125,7 @@ type NetworkEndpointGroupsListNetworkEndpointsCall struct { } // ListNetworkEndpoints: Lists the network endpoints in the specified -// network endpoint group. +// network endpoint group. (== suppress_warning http-rest-shadowed ==) func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -69049,6 +78225,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69116,7 +78293,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists the network endpoints in the specified network endpoint group.", + // "description": "Lists the network endpoints in the specified network endpoint group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.networkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ @@ -69219,7 +78396,7 @@ type NetworkEndpointGroupsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -69256,6 +78433,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69321,7 +78499,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.networkEndpointGroups.testIamPermissions", // "parameterOrder": [ @@ -69380,7 +78558,8 @@ type NetworksAddPeeringCall struct { header_ http.Header } -// AddPeering: Adds a peering to the specified network. +// AddPeering: Adds a peering to the specified network. (== +// suppress_warning http-rest-shadowed ==) func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -69435,6 +78614,7 @@ func (c *NetworksAddPeeringCall) Header() http.Header { func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69499,7 +78679,7 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds a peering to the specified network.", + // "description": "Adds a peering to the specified network. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.networks.addPeering", // "parameterOrder": [ @@ -69553,7 +78733,8 @@ type NetworksDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified network. +// Delete: Deletes the specified network. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -69608,6 +78789,7 @@ func (c *NetworksDeleteCall) Header() http.Header { func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69667,7 +78849,7 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified network.", + // "description": "Deletes the specified network. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.networks.delete", // "parameterOrder": [ @@ -69720,7 +78902,8 @@ type NetworksGetCall struct { } // Get: Returns the specified network. Gets a list of available networks -// by making a list() request. +// by making a list() request. (== suppress_warning http-rest-shadowed +// ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get func (r *NetworksService) Get(project string, network string) *NetworksGetCall { c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -69766,6 +78949,7 @@ func (c *NetworksGetCall) Header() http.Header { func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69828,7 +79012,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + // "description": "Returns the specified network. Gets a list of available networks by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.networks.get", // "parameterOrder": [ @@ -69876,7 +79060,7 @@ type NetworksInsertCall struct { } // Insert: Creates a network in the specified project using the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -69931,6 +79115,7 @@ func (c *NetworksInsertCall) Header() http.Header { func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69994,7 +79179,7 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a network in the specified project using the data included in the request.", + // "description": "Creates a network in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.networks.insert", // "parameterOrder": [ @@ -70041,7 +79226,7 @@ type NetworksListCall struct { } // List: Retrieves the list of networks available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list func (r *NetworksService) List(project string) *NetworksListCall { c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -70149,6 +79334,7 @@ func (c *NetworksListCall) Header() http.Header { func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70210,7 +79396,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } return ret, nil // { - // "description": "Retrieves the list of networks available to the specified project.", + // "description": "Retrieves the list of networks available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.networks.list", // "parameterOrder": [ @@ -70296,7 +79482,8 @@ type NetworksPatchCall struct { // Patch: Patches the specified network with the data included in the // request. Only the following fields can be modified: -// routingConfig.routingMode. +// routingConfig.routingMode. (== suppress_warning http-rest-shadowed +// ==) func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -70351,6 +79538,7 @@ func (c *NetworksPatchCall) Header() http.Header { func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70415,7 +79603,7 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", + // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.networks.patch", // "parameterOrder": [ @@ -70470,7 +79658,8 @@ type NetworksRemovePeeringCall struct { header_ http.Header } -// RemovePeering: Removes a peering from the specified network. +// RemovePeering: Removes a peering from the specified network. (== +// suppress_warning http-rest-shadowed ==) func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -70525,6 +79714,7 @@ func (c *NetworksRemovePeeringCall) Header() http.Header { func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70589,7 +79779,7 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Removes a peering from the specified network.", + // "description": "Removes a peering from the specified network. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.networks.removePeering", // "parameterOrder": [ @@ -70644,7 +79834,7 @@ type NetworksSwitchToCustomModeCall struct { } // SwitchToCustomMode: Switches the network mode from auto subnet mode -// to custom subnet mode. +// to custom subnet mode. (== suppress_warning http-rest-shadowed ==) func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -70698,6 +79888,7 @@ func (c *NetworksSwitchToCustomModeCall) Header() http.Header { func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70757,7 +79948,7 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + // "description": "Switches the network mode from auto subnet mode to custom subnet mode. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.networks.switchToCustomMode", // "parameterOrder": [ @@ -70797,6 +79988,185 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } +// method id "compute.networks.updatePeering": + +type NetworksUpdatePeeringCall struct { + s *Service + project string + network string + networksupdatepeeringrequest *NetworksUpdatePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// UpdatePeering: Updates the specified network peering with the data +// included in the request Only the following fields can be modified: +// NetworkPeering.export_custom_routes, and +// NetworkPeering.import_custom_routes (== suppress_warning +// http-rest-shadowed ==) +func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { + c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + c.networksupdatepeeringrequest = networksupdatepeeringrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdatePeeringCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdatePeeringCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksUpdatePeeringCall) Context(ctx context.Context) *NetworksUpdatePeeringCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksUpdatePeeringCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksupdatepeeringrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/updatePeering") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networks.updatePeering" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "PATCH", + // "id": "compute.networks.updatePeering", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network resource which the updated peering is belonging to.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/networks/{network}/updatePeering", + // "request": { + // "$ref": "NetworksUpdatePeeringRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.nodeGroups.addNodes": type NodeGroupsAddNodesCall struct { @@ -70810,7 +80180,8 @@ type NodeGroupsAddNodesCall struct { header_ http.Header } -// AddNodes: Adds specified number of nodes to the node group. +// AddNodes: Adds specified number of nodes to the node group. (== +// suppress_warning http-rest-shadowed ==) func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -70866,6 +80237,7 @@ func (c *NodeGroupsAddNodesCall) Header() http.Header { func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70931,7 +80303,7 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds specified number of nodes to the node group.", + // "description": "Adds specified number of nodes to the node group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeGroups.addNodes", // "parameterOrder": [ @@ -70994,7 +80366,8 @@ type NodeGroupsAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of node groups. Note: -// use nodeGroups.listNodes for more details about each group. +// use nodeGroups.listNodes for more details about each group. (== +// suppress_warning http-rest-shadowed ==) func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71101,6 +80474,7 @@ func (c *NodeGroupsAggregatedListCall) Header() http.Header { func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71162,7 +80536,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr } return ret, nil // { - // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", + // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeGroups.aggregatedList", // "parameterOrder": [ @@ -71246,7 +80620,8 @@ type NodeGroupsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified NodeGroup resource. +// Delete: Deletes the specified NodeGroup resource. (== +// suppress_warning http-rest-shadowed ==) func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71301,6 +80676,7 @@ func (c *NodeGroupsDeleteCall) Header() http.Header { func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71361,7 +80737,7 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Deletes the specified NodeGroup resource.", + // "description": "Deletes the specified NodeGroup resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.nodeGroups.delete", // "parameterOrder": [ @@ -71422,7 +80798,8 @@ type NodeGroupsDeleteNodesCall struct { header_ http.Header } -// DeleteNodes: Deletes specified nodes from the node group. +// DeleteNodes: Deletes specified nodes from the node group. (== +// suppress_warning http-rest-shadowed ==) func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71478,6 +80855,7 @@ func (c *NodeGroupsDeleteNodesCall) Header() http.Header { func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71543,7 +80921,7 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes specified nodes from the node group.", + // "description": "Deletes specified nodes from the node group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeGroups.deleteNodes", // "parameterOrder": [ @@ -71553,7 +80931,7 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation // ], // "parameters": { // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", + // "description": "Name of the NodeGroup resource whose nodes will be deleted.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -71609,7 +80987,8 @@ type NodeGroupsGetCall struct { // Get: Returns the specified NodeGroup. Get a list of available // NodeGroups by making a list() request. Note: the "nodes" field should -// not be used. Use nodeGroups.listNodes instead. +// not be used. Use nodeGroups.listNodes instead. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71655,6 +81034,7 @@ func (c *NodeGroupsGetCall) Header() http.Header { func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71718,7 +81098,7 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) } return ret, nil // { - // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", + // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeGroups.get", // "parameterOrder": [ @@ -71776,7 +81156,8 @@ type NodeGroupsGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71822,6 +81203,7 @@ func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71885,7 +81267,7 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeGroups.getIamPolicy", // "parameterOrder": [ @@ -71942,7 +81324,8 @@ type NodeGroupsInsertCall struct { } // Insert: Creates a NodeGroup resource in the specified project using -// the data included in the request. +// the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71998,6 +81381,7 @@ func (c *NodeGroupsInsertCall) Header() http.Header { func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72062,7 +81446,7 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", + // "description": "Creates a NodeGroup resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeGroups.insert", // "parameterOrder": [ @@ -72127,7 +81511,7 @@ type NodeGroupsListCall struct { // List: Retrieves a list of node groups available to the specified // project. Note: use nodeGroups.listNodes for more details about each -// group. +// group. (== suppress_warning http-rest-shadowed ==) func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72235,6 +81619,7 @@ func (c *NodeGroupsListCall) Header() http.Header { func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72297,7 +81682,7 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e } return ret, nil // { - // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", + // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeGroups.list", // "parameterOrder": [ @@ -72389,7 +81774,8 @@ type NodeGroupsListNodesCall struct { header_ http.Header } -// ListNodes: Lists nodes in the node group. +// ListNodes: Lists nodes in the node group. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72488,6 +81874,7 @@ func (c *NodeGroupsListNodesCall) Header() http.Header { func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72548,7 +81935,7 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL } return ret, nil // { - // "description": "Lists nodes in the node group.", + // "description": "Lists nodes in the node group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeGroups.listNodes", // "parameterOrder": [ @@ -72650,7 +82037,8 @@ type NodeGroupsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72687,6 +82075,7 @@ func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72752,7 +82141,7 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeGroups.setIamPolicy", // "parameterOrder": [ @@ -72811,7 +82200,8 @@ type NodeGroupsSetNodeTemplateCall struct { header_ http.Header } -// SetNodeTemplate: Updates the node template of the node group. +// SetNodeTemplate: Updates the node template of the node group. (== +// suppress_warning http-rest-shadowed ==) func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -72867,6 +82257,7 @@ func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72932,7 +82323,7 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Updates the node template of the node group.", + // "description": "Updates the node template of the node group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeGroups.setNodeTemplate", // "parameterOrder": [ @@ -72997,7 +82388,7 @@ type NodeGroupsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73034,6 +82425,7 @@ func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73099,7 +82491,7 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeGroups.testIamPermissions", // "parameterOrder": [ @@ -73157,7 +82549,8 @@ type NodeTemplatesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node templates. +// AggregatedList: Retrieves an aggregated list of node templates. (== +// suppress_warning http-rest-shadowed ==) func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73264,6 +82657,7 @@ func (c *NodeTemplatesAggregatedListCall) Header() http.Header { func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73325,7 +82719,7 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod } return ret, nil // { - // "description": "Retrieves an aggregated list of node templates.", + // "description": "Retrieves an aggregated list of node templates. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeTemplates.aggregatedList", // "parameterOrder": [ @@ -73409,7 +82803,8 @@ type NodeTemplatesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified NodeTemplate resource. +// Delete: Deletes the specified NodeTemplate resource. (== +// suppress_warning http-rest-shadowed ==) func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73464,6 +82859,7 @@ func (c *NodeTemplatesDeleteCall) Header() http.Header { func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73524,7 +82920,7 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified NodeTemplate resource.", + // "description": "Deletes the specified NodeTemplate resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.nodeTemplates.delete", // "parameterOrder": [ @@ -73586,7 +82982,8 @@ type NodeTemplatesGetCall struct { } // Get: Returns the specified node template. Gets a list of available -// node templates by making a list() request. +// node templates by making a list() request. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73632,6 +83029,7 @@ func (c *NodeTemplatesGetCall) Header() http.Header { func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73695,7 +83093,7 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, } return ret, nil // { - // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeTemplates.get", // "parameterOrder": [ @@ -73753,7 +83151,8 @@ type NodeTemplatesGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73799,6 +83198,7 @@ func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73862,7 +83262,7 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeTemplates.getIamPolicy", // "parameterOrder": [ @@ -73919,7 +83319,8 @@ type NodeTemplatesInsertCall struct { } // Insert: Creates a NodeTemplate resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73974,6 +83375,7 @@ func (c *NodeTemplatesInsertCall) Header() http.Header { func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74038,7 +83440,7 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", + // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeTemplates.insert", // "parameterOrder": [ @@ -74094,7 +83496,7 @@ type NodeTemplatesListCall struct { } // List: Retrieves a list of node templates available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -74202,6 +83604,7 @@ func (c *NodeTemplatesListCall) Header() http.Header { func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74264,7 +83667,7 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL } return ret, nil // { - // "description": "Retrieves a list of node templates available to the specified project.", + // "description": "Retrieves a list of node templates available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeTemplates.list", // "parameterOrder": [ @@ -74358,7 +83761,8 @@ type NodeTemplatesSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -74395,6 +83799,7 @@ func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74460,7 +83865,7 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeTemplates.setIamPolicy", // "parameterOrder": [ @@ -74520,7 +83925,7 @@ type NodeTemplatesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -74557,6 +83962,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74622,7 +84028,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.nodeTemplates.testIamPermissions", // "parameterOrder": [ @@ -74680,7 +84086,8 @@ type NodeTypesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node types. +// AggregatedList: Retrieves an aggregated list of node types. (== +// suppress_warning http-rest-shadowed ==) func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -74787,6 +84194,7 @@ func (c *NodeTypesAggregatedListCall) Header() http.Header { func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74848,7 +84256,7 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of node types.", + // "description": "Retrieves an aggregated list of node types. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeTypes.aggregatedList", // "parameterOrder": [ @@ -74934,7 +84342,8 @@ type NodeTypesGetCall struct { } // Get: Returns the specified node type. Gets a list of available node -// types by making a list() request. +// types by making a list() request. (== suppress_warning +// http-rest-shadowed ==) func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -74980,6 +84389,7 @@ func (c *NodeTypesGetCall) Header() http.Header { func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75043,7 +84453,7 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { } return ret, nil // { - // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + // "description": "Returns the specified node type. Gets a list of available node types by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeTypes.get", // "parameterOrder": [ @@ -75100,7 +84510,7 @@ type NodeTypesListCall struct { } // List: Retrieves a list of node types available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75208,6 +84618,7 @@ func (c *NodeTypesListCall) Header() http.Header { func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75270,7 +84681,7 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err } return ret, nil // { - // "description": "Retrieves a list of node types available to the specified project.", + // "description": "Retrieves a list of node types available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.nodeTypes.list", // "parameterOrder": [ @@ -75361,6 +84772,7 @@ type ProjectsDisableXpnHostCall struct { } // DisableXpnHost: Disable this project as a shared VPC host project. +// (== suppress_warning http-rest-shadowed ==) func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75413,6 +84825,7 @@ func (c *ProjectsDisableXpnHostCall) Header() http.Header { func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75471,7 +84884,7 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Disable this project as a shared VPC host project.", + // "description": "Disable this project as a shared VPC host project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.disableXpnHost", // "parameterOrder": [ @@ -75514,8 +84927,9 @@ type ProjectsDisableXpnResourceCall struct { header_ http.Header } -// DisableXpnResource: Disable a serivce resource (a.k.a service -// project) associated with this host project. +// DisableXpnResource: Disable a service resource (also known as service +// project) associated with this host project. (== suppress_warning +// http-rest-shadowed ==) func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75569,6 +84983,7 @@ func (c *ProjectsDisableXpnResourceCall) Header() http.Header { func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75632,7 +85047,7 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", + // "description": "Disable a service resource (also known as service project) associated with this host project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.disableXpnResource", // "parameterOrder": [ @@ -75677,7 +85092,8 @@ type ProjectsEnableXpnHostCall struct { header_ http.Header } -// EnableXpnHost: Enable this project as a shared VPC host project. +// EnableXpnHost: Enable this project as a shared VPC host project. (== +// suppress_warning http-rest-shadowed ==) func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75730,6 +85146,7 @@ func (c *ProjectsEnableXpnHostCall) Header() http.Header { func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75788,7 +85205,7 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Enable this project as a shared VPC host project.", + // "description": "Enable this project as a shared VPC host project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.enableXpnHost", // "parameterOrder": [ @@ -75833,7 +85250,8 @@ type ProjectsEnableXpnResourceCall struct { // EnableXpnResource: Enable service resource (a.k.a service project) // for a host project, so that subnets in the host project can be used -// by instances in the service project. +// by instances in the service project. (== suppress_warning +// http-rest-shadowed ==) func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75887,6 +85305,7 @@ func (c *ProjectsEnableXpnResourceCall) Header() http.Header { func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75950,7 +85369,7 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", + // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.enableXpnResource", // "parameterOrder": [ @@ -75996,7 +85415,8 @@ type ProjectsGetCall struct { header_ http.Header } -// Get: Returns the specified Project resource. +// Get: Returns the specified Project resource. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get func (r *ProjectsService) Get(project string) *ProjectsGetCall { c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -76041,6 +85461,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76102,7 +85523,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Returns the specified Project resource.", + // "description": "Returns the specified Project resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.projects.get", // "parameterOrder": [ @@ -76142,7 +85563,8 @@ type ProjectsGetXpnHostCall struct { } // GetXpnHost: Gets the shared VPC host project that this project links -// to. May be empty if no link exists. +// to. May be empty if no link exists. (== suppress_warning +// http-rest-shadowed ==) func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76186,6 +85608,7 @@ func (c *ProjectsGetXpnHostCall) Header() http.Header { func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76247,7 +85670,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } return ret, nil // { - // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", + // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.projects.getXpnHost", // "parameterOrder": [ @@ -76286,7 +85709,8 @@ type ProjectsGetXpnResourcesCall struct { } // GetXpnResources: Gets service resources (a.k.a service project) -// associated with this host project. +// associated with this host project. (== suppress_warning +// http-rest-shadowed ==) func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76393,6 +85817,7 @@ func (c *ProjectsGetXpnResourcesCall) Header() http.Header { func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76454,7 +85879,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } return ret, nil // { - // "description": "Gets service resources (a.k.a service project) associated with this host project.", + // "description": "Gets service resources (a.k.a service project) associated with this host project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.projects.getXpnResources", // "parameterOrder": [ @@ -76537,7 +85962,7 @@ type ProjectsListXpnHostsCall struct { } // ListXpnHosts: Lists all shared VPC host projects visible to the user -// in an organization. +// in an organization. (== suppress_warning http-rest-shadowed ==) func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76635,6 +86060,7 @@ func (c *ProjectsListXpnHostsCall) Header() http.Header { func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76698,7 +86124,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis } return ret, nil // { - // "description": "Lists all shared VPC host projects visible to the user in an organization.", + // "description": "Lists all shared VPC host projects visible to the user in an organization. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.listXpnHosts", // "parameterOrder": [ @@ -76783,7 +86209,8 @@ type ProjectsMoveDiskCall struct { header_ http.Header } -// MoveDisk: Moves a persistent disk from one zone to another. +// MoveDisk: Moves a persistent disk from one zone to another. (== +// suppress_warning http-rest-shadowed ==) func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76837,6 +86264,7 @@ func (c *ProjectsMoveDiskCall) Header() http.Header { func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76900,7 +86328,7 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Moves a persistent disk from one zone to another.", + // "description": "Moves a persistent disk from one zone to another. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.moveDisk", // "parameterOrder": [ @@ -76947,7 +86375,7 @@ type ProjectsMoveInstanceCall struct { } // MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. +// from one zone to another. (== suppress_warning http-rest-shadowed ==) func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77001,6 +86429,7 @@ func (c *ProjectsMoveInstanceCall) Header() http.Header { func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77064,7 +86493,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves an instance and its attached persistent disks from one zone to another.", + // "description": "Moves an instance and its attached persistent disks from one zone to another. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.moveInstance", // "parameterOrder": [ @@ -77112,6 +86541,7 @@ type ProjectsSetCommonInstanceMetadataCall struct { // SetCommonInstanceMetadata: Sets metadata common to all instances // within the specified project using the data included in the request. +// (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -77166,6 +86596,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77229,7 +86660,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + // "description": "Sets metadata common to all instances within the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.setCommonInstanceMetadata", // "parameterOrder": [ @@ -77278,7 +86709,7 @@ type ProjectsSetDefaultNetworkTierCall struct { // SetDefaultNetworkTier: Sets the default network tier of the project. // The default network tier is used when an // address/forwardingRule/instance is created without specifying the -// network tier field. +// network tier field. (== suppress_warning http-rest-shadowed ==) func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77332,6 +86763,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77395,7 +86827,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", + // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.setDefaultNetworkTier", // "parameterOrder": [ @@ -77444,7 +86876,7 @@ type ProjectsSetUsageExportBucketCall struct { // SetUsageExportBucket: Enables the usage export feature and sets the // usage export bucket where reports are stored. If you provide an empty // request body using this method, the usage export feature will be -// disabled. +// disabled. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -77499,6 +86931,7 @@ func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77562,7 +86995,7 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.projects.setUsageExportBucket", // "parameterOrder": [ @@ -77612,7 +87045,8 @@ type RegionAutoscalersDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified autoscaler. +// Delete: Deletes the specified autoscaler. (== suppress_warning +// http-rest-shadowed ==) func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77667,6 +87101,7 @@ func (c *RegionAutoscalersDeleteCall) Header() http.Header { func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77727,7 +87162,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified autoscaler.", + // "description": "Deletes the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.regionAutoscalers.delete", // "parameterOrder": [ @@ -77788,7 +87223,8 @@ type RegionAutoscalersGetCall struct { header_ http.Header } -// Get: Returns the specified autoscaler. +// Get: Returns the specified autoscaler. (== suppress_warning +// http-rest-shadowed ==) func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77834,6 +87270,7 @@ func (c *RegionAutoscalersGetCall) Header() http.Header { func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77897,7 +87334,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } return ret, nil // { - // "description": "Returns the specified autoscaler.", + // "description": "Returns the specified autoscaler. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionAutoscalers.get", // "parameterOrder": [ @@ -77954,7 +87391,7 @@ type RegionAutoscalersInsertCall struct { } // Insert: Creates an autoscaler in the specified project using the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78009,6 +87446,7 @@ func (c *RegionAutoscalersInsertCall) Header() http.Header { func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78073,7 +87511,7 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Creates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionAutoscalers.insert", // "parameterOrder": [ @@ -78129,7 +87567,7 @@ type RegionAutoscalersListCall struct { } // List: Retrieves a list of autoscalers contained within the specified -// region. +// region. (== suppress_warning http-rest-shadowed ==) func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78237,6 +87675,7 @@ func (c *RegionAutoscalersListCall) Header() http.Header { func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78299,7 +87738,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified region.", + // "description": "Retrieves a list of autoscalers contained within the specified region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionAutoscalers.list", // "parameterOrder": [ @@ -78393,7 +87832,8 @@ type RegionAutoscalersPatchCall struct { // Patch: Updates an autoscaler in the specified project using the data // included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. +// uses the JSON merge patch format and processing rules. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78455,6 +87895,7 @@ func (c *RegionAutoscalersPatchCall) Header() http.Header { func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78519,7 +87960,7 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.regionAutoscalers.patch", // "parameterOrder": [ @@ -78581,7 +88022,7 @@ type RegionAutoscalersUpdateCall struct { } // Update: Updates an autoscaler in the specified project using the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78643,6 +88084,7 @@ func (c *RegionAutoscalersUpdateCall) Header() http.Header { func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78707,7 +88149,7 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "description": "Updates an autoscaler in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.regionAutoscalers.update", // "parameterOrder": [ @@ -78768,7 +88210,8 @@ type RegionBackendServicesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified regional BackendService resource. +// Delete: Deletes the specified regional BackendService resource. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78823,6 +88266,7 @@ func (c *RegionBackendServicesDeleteCall) Header() http.Header { func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78883,7 +88327,7 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified regional BackendService resource.", + // "description": "Deletes the specified regional BackendService resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.regionBackendServices.delete", // "parameterOrder": [ @@ -78944,7 +88388,8 @@ type RegionBackendServicesGetCall struct { header_ http.Header } -// Get: Returns the specified regional BackendService resource. +// Get: Returns the specified regional BackendService resource. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78990,6 +88435,7 @@ func (c *RegionBackendServicesGetCall) Header() http.Header { func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79053,7 +88499,7 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Returns the specified regional BackendService resource.", + // "description": "Returns the specified regional BackendService resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionBackendServices.get", // "parameterOrder": [ @@ -79111,7 +88557,7 @@ type RegionBackendServicesGetHealthCall struct { } // GetHealth: Gets the most recent health check results for this -// regional BackendService. +// regional BackendService. (== suppress_warning http-rest-shadowed ==) func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79148,6 +88594,7 @@ func (c *RegionBackendServicesGetHealthCall) Header() http.Header { func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79213,7 +88660,7 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the most recent health check results for this regional BackendService.", + // "description": "Gets the most recent health check results for this regional BackendService. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionBackendServices.getHealth", // "parameterOrder": [ @@ -79275,7 +88722,7 @@ type RegionBackendServicesInsertCall struct { // project using the data included in the request. There are several // restrictions and guidelines to keep in mind when creating a regional // backend service. Read Restrictions and Guidelines for more -// information. +// information. (== suppress_warning http-rest-shadowed ==) func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79330,6 +88777,7 @@ func (c *RegionBackendServicesInsertCall) Header() http.Header { func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79394,7 +88842,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ @@ -79450,7 +88898,8 @@ type RegionBackendServicesListCall struct { } // List: Retrieves the list of regional BackendService resources -// available to the specified project in the given region. +// available to the specified project in the given region. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79558,6 +89007,7 @@ func (c *RegionBackendServicesListCall) Header() http.Header { func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79620,7 +89070,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe } return ret, nil // { - // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionBackendServices.list", // "parameterOrder": [ @@ -79718,7 +89168,7 @@ type RegionBackendServicesPatchCall struct { // guidelines to keep in mind when updating a backend service. Read // Restrictions and Guidelines for more information. This method // supports PATCH semantics and uses the JSON merge patch format and -// processing rules. +// processing rules. (== suppress_warning http-rest-shadowed ==) func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79774,6 +89224,7 @@ func (c *RegionBackendServicesPatchCall) Header() http.Header { func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79839,7 +89290,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ @@ -79906,7 +89357,8 @@ type RegionBackendServicesUpdateCall struct { // Update: Updates the specified regional BackendService resource with // the data included in the request. There are several restrictions and // guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. +// Restrictions and Guidelines for more information. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79962,6 +89414,7 @@ func (c *RegionBackendServicesUpdateCall) Header() http.Header { func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80027,7 +89480,7 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.regionBackendServices.update", // "parameterOrder": [ @@ -80089,7 +89542,8 @@ type RegionCommitmentsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of commitments. +// AggregatedList: Retrieves an aggregated list of commitments. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -80196,6 +89650,7 @@ func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80257,7 +89712,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of commitments.", + // "description": "Retrieves an aggregated list of commitments. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionCommitments.aggregatedList", // "parameterOrder": [ @@ -80343,7 +89798,8 @@ type RegionCommitmentsGetCall struct { } // Get: Returns the specified commitment resource. Gets a list of -// available commitments by making a list() request. +// available commitments by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -80389,6 +89845,7 @@ func (c *RegionCommitmentsGetCall) Header() http.Header { func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80452,7 +89909,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment } return ret, nil // { - // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionCommitments.get", // "parameterOrder": [ @@ -80509,7 +89966,7 @@ type RegionCommitmentsInsertCall struct { } // Insert: Creates a commitment in the specified project using the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -80564,6 +90021,7 @@ func (c *RegionCommitmentsInsertCall) Header() http.Header { func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80628,7 +90086,7 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a commitment in the specified project using the data included in the request.", + // "description": "Creates a commitment in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionCommitments.insert", // "parameterOrder": [ @@ -80684,7 +90142,7 @@ type RegionCommitmentsListCall struct { } // List: Retrieves a list of commitments contained within the specified -// region. +// region. (== suppress_warning http-rest-shadowed ==) func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -80792,6 +90250,7 @@ func (c *RegionCommitmentsListCall) Header() http.Header { func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80854,7 +90313,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen } return ret, nil // { - // "description": "Retrieves a list of commitments contained within the specified region.", + // "description": "Retrieves a list of commitments contained within the specified region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionCommitments.list", // "parameterOrder": [ @@ -80948,7 +90407,8 @@ type RegionDiskTypesGetCall struct { } // Get: Returns the specified regional disk type. Gets a list of -// available disk types by making a list() request. +// available disk types by making a list() request. (== suppress_warning +// http-rest-shadowed ==) func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -80994,6 +90454,7 @@ func (c *RegionDiskTypesGetCall) Header() http.Header { func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81057,7 +90518,7 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er } return ret, nil // { - // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionDiskTypes.get", // "parameterOrder": [ @@ -81114,7 +90575,7 @@ type RegionDiskTypesListCall struct { } // List: Retrieves a list of regional disk types available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81222,6 +90683,7 @@ func (c *RegionDiskTypesListCall) Header() http.Header { func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81284,7 +90746,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT } return ret, nil // { - // "description": "Retrieves a list of regional disk types available to the specified project.", + // "description": "Retrieves a list of regional disk types available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionDiskTypes.list", // "parameterOrder": [ @@ -81364,6 +90826,195 @@ func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskT } } +// method id "compute.regionDisks.addResourcePolicies": + +type RegionDisksAddResourcePoliciesCall struct { + s *Service + project string + region string + disk string + regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddResourcePolicies: Adds existing resource policies to a regional +// disk. You can only add one policy which will be applied to this disk +// for scheduling snapshot creation. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionDisksService) AddResourcePolicies(project string, region string, disk string, regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest) *RegionDisksAddResourcePoliciesCall { + c := &RegionDisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.disk = disk + c.regiondisksaddresourcepoliciesrequest = regiondisksaddresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionDisksAddResourcePoliciesCall) RequestId(requestId string) *RegionDisksAddResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksAddResourcePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionDisksAddResourcePoliciesCall) Context(ctx context.Context) *RegionDisksAddResourcePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksaddresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/addResourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionDisks.addResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionDisks.addResourcePolicies", + // "parameterOrder": [ + // "project", + // "region", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/disks/{disk}/addResourcePolicies", + // "request": { + // "$ref": "RegionDisksAddResourcePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionDisks.createSnapshot": type RegionDisksCreateSnapshotCall struct { @@ -81377,7 +91028,8 @@ type RegionDisksCreateSnapshotCall struct { header_ http.Header } -// CreateSnapshot: Creates a snapshot of this regional disk. +// CreateSnapshot: Creates a snapshot of this regional disk. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81433,6 +91085,7 @@ func (c *RegionDisksCreateSnapshotCall) Header() http.Header { func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81498,7 +91151,7 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a snapshot of this regional disk.", + // "description": "Creates a snapshot of this regional disk. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionDisks.createSnapshot", // "parameterOrder": [ @@ -81565,6 +91218,7 @@ type RegionDisksDeleteCall struct { // regional disk removes all the replicas of its data permanently and is // irreversible. However, deleting a disk does not delete any snapshots // previously made from the disk. You must separately delete snapshots. +// (== suppress_warning http-rest-shadowed ==) func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81619,6 +91273,7 @@ func (c *RegionDisksDeleteCall) Header() http.Header { func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81679,7 +91334,7 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.regionDisks.delete", // "parameterOrder": [ @@ -81739,7 +91394,8 @@ type RegionDisksGetCall struct { header_ http.Header } -// Get: Returns a specified regional persistent disk. +// Get: Returns a specified regional persistent disk. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81785,6 +91441,7 @@ func (c *RegionDisksGetCall) Header() http.Header { func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81848,7 +91505,7 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified regional persistent disk.", + // "description": "Returns a specified regional persistent disk. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionDisks.get", // "parameterOrder": [ @@ -81905,7 +91562,8 @@ type RegionDisksInsertCall struct { } // Insert: Creates a persistent regional disk in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -81967,6 +91625,7 @@ func (c *RegionDisksInsertCall) Header() http.Header { func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82031,7 +91690,7 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + // "description": "Creates a persistent regional disk in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionDisks.insert", // "parameterOrder": [ @@ -82092,7 +91751,7 @@ type RegionDisksListCall struct { } // List: Retrieves the list of persistent disks contained within the -// specified region. +// specified region. (== suppress_warning http-rest-shadowed ==) func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -82200,6 +91859,7 @@ func (c *RegionDisksListCall) Header() http.Header { func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82262,7 +91922,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error } return ret, nil // { - // "description": "Retrieves the list of persistent disks contained within the specified region.", + // "description": "Retrieves the list of persistent disks contained within the specified region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionDisks.list", // "parameterOrder": [ @@ -82342,6 +92002,193 @@ func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error } } +// method id "compute.regionDisks.removeResourcePolicies": + +type RegionDisksRemoveResourcePoliciesCall struct { + s *Service + project string + region string + disk string + regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveResourcePolicies: Removes resource policies from a regional +// disk. (== suppress_warning http-rest-shadowed ==) +func (r *RegionDisksService) RemoveResourcePolicies(project string, region string, disk string, regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest) *RegionDisksRemoveResourcePoliciesCall { + c := &RegionDisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.disk = disk + c.regiondisksremoveresourcepoliciesrequest = regiondisksremoveresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *RegionDisksRemoveResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksRemoveResourcePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionDisksRemoveResourcePoliciesCall) Context(ctx context.Context) *RegionDisksRemoveResourcePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksremoveresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/removeResourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionDisks.removeResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes resource policies from a regional disk. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionDisks.removeResourcePolicies", + // "parameterOrder": [ + // "project", + // "region", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/disks/{disk}/removeResourcePolicies", + // "request": { + // "$ref": "RegionDisksRemoveResourcePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionDisks.resize": type RegionDisksResizeCall struct { @@ -82355,7 +92202,8 @@ type RegionDisksResizeCall struct { header_ http.Header } -// Resize: Resizes the specified regional persistent disk. +// Resize: Resizes the specified regional persistent disk. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -82411,6 +92259,7 @@ func (c *RegionDisksResizeCall) Header() http.Header { func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82476,7 +92325,7 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Resizes the specified regional persistent disk.", + // "description": "Resizes the specified regional persistent disk. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionDisks.resize", // "parameterOrder": [ @@ -82540,7 +92389,8 @@ type RegionDisksSetLabelsCall struct { header_ http.Header } -// SetLabels: Sets the labels on the target regional disk. +// SetLabels: Sets the labels on the target regional disk. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -82596,6 +92446,7 @@ func (c *RegionDisksSetLabelsCall) Header() http.Header { func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82661,7 +92512,7 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets the labels on the target regional disk.", + // "description": "Sets the labels on the target regional disk. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionDisks.setLabels", // "parameterOrder": [ @@ -82726,7 +92577,7 @@ type RegionDisksTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -82763,6 +92614,7 @@ func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82828,7 +92680,7 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionDisks.testIamPermissions", // "parameterOrder": [ @@ -82875,6 +92727,1170 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } +// method id "compute.regionHealthChecks.delete": + +type RegionHealthChecksDeleteCall struct { + s *Service + project string + region string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified HealthCheck resource. (== +// suppress_warning http-rest-shadowed ==) +func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { + c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheck = healthCheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealthChecksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealthChecksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksDeleteCall) Context(ctx context.Context) *RegionHealthChecksDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified HealthCheck resource. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "DELETE", + // "id": "compute.regionHealthChecks.delete", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionHealthChecks.get": + +type RegionHealthChecksGetCall struct { + s *Service + project string + region string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. (== +// suppress_warning http-rest-shadowed ==) +func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { + c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheck = healthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthChecksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksGetCall) Context(ctx context.Context) *RegionHealthChecksGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HealthCheck{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionHealthChecks.get", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "response": { + // "$ref": "HealthCheck" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionHealthChecks.insert": + +type RegionHealthChecksInsertCall struct { + s *Service + project string + region string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { + c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealthChecksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksInsertCall) Context(ctx context.Context) *RegionHealthChecksInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionHealthChecks.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks", + // "request": { + // "$ref": "HealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionHealthChecks.list": + +type RegionHealthChecksListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of HealthCheck resources available to the +// specified project. (== suppress_warning http-rest-shadowed ==) +func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { + c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthChecksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksListCall) Context(ctx context.Context) *RegionHealthChecksListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HealthCheckList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of HealthCheck resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionHealthChecks.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks", + // "response": { + // "$ref": "HealthCheckList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionHealthChecks.patch": + +type RegionHealthChecksPatchCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// (== suppress_warning http-rest-shadowed ==) +func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { + c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksPatchCall) Context(ctx context.Context) *RegionHealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "PATCH", + // "id": "compute.regionHealthChecks.patch", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionHealthChecks.update": + +type RegionHealthChecksUpdateCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { + c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealthChecksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksUpdateCall) Context(ctx context.Context) *RegionHealthChecksUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "PUT", + // "id": "compute.regionHealthChecks.update", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionInstanceGroupManagers.abandonInstances": type RegionInstanceGroupManagersAbandonInstancesCall struct { @@ -82904,7 +93920,7 @@ type RegionInstanceGroupManagersAbandonInstancesCall struct { // deleted. // // You can specify a maximum of 1000 instances with this method per -// request. +// request. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -82960,6 +93976,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83025,7 +94042,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.abandonInstances", // "parameterOrder": [ @@ -83087,7 +94104,7 @@ type RegionInstanceGroupManagersDeleteCall struct { } // Delete: Deletes the specified managed instance group and all of the -// instances in that group. +// instances in that group. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -83142,6 +94159,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83202,7 +94220,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group.", + // "description": "Deletes the specified managed instance group and all of the instances in that group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.regionInstanceGroupManagers.delete", // "parameterOrder": [ @@ -83277,7 +94295,7 @@ type RegionInstanceGroupManagersDeleteInstancesCall struct { // deleted. // // You can specify a maximum of 1000 instances with this method per -// request. +// request. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -83333,6 +94351,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83398,7 +94417,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.deleteInstances", // "parameterOrder": [ @@ -83461,7 +94480,7 @@ type RegionInstanceGroupManagersGetCall struct { } // Get: Returns all of the details about the specified managed instance -// group. +// group. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -83507,6 +94526,7 @@ func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83570,7 +94590,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group.", + // "description": "Returns all of the details about the specified managed instance group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionInstanceGroupManagers.get", // "parameterOrder": [ @@ -83633,6 +94653,7 @@ type RegionInstanceGroupManagersInsertCall struct { // listmanagedinstances method. // // A regional managed instance group can contain up to 2000 instances. +// (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -83687,6 +94708,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83751,7 +94773,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.insert", // "parameterOrder": [ @@ -83806,7 +94828,8 @@ type RegionInstanceGroupManagersListCall struct { } // List: Retrieves the list of managed instance groups that are -// contained within the specified region. +// contained within the specified region. (== suppress_warning +// http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -83914,6 +94937,7 @@ func (c *RegionInstanceGroupManagersListCall) Header() http.Header { func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83976,7 +95000,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + // "description": "Retrieves the list of managed instance groups that are contained within the specified region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionInstanceGroupManagers.list", // "parameterOrder": [ @@ -84070,7 +95094,7 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // ListManagedInstances: Lists the instances in the managed instance // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its -// instances. +// instances. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84169,6 +95193,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Head func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84231,7 +95256,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ @@ -84315,7 +95340,7 @@ type RegionInstanceGroupManagersPatchCall struct { // process of being patched. You must separately verify the status of // the individual instances with the listmanagedinstances method. This // method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. +// and processing rules. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84371,6 +95396,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84436,7 +95462,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.regionInstanceGroupManagers.patch", // "parameterOrder": [ @@ -84512,7 +95538,7 @@ type RegionInstanceGroupManagersRecreateInstancesCall struct { // deleted. // // You can specify a maximum of 1000 instances with this method per -// request. +// request. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84568,6 +95594,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84633,7 +95660,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ @@ -84707,6 +95734,7 @@ type RegionInstanceGroupManagersResizeCall struct { // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining // duration has elapsed before the VM instance is removed or deleted. +// (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84762,6 +95790,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84822,7 +95851,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.resize", // "parameterOrder": [ @@ -84892,7 +95921,8 @@ type RegionInstanceGroupManagersSetInstanceTemplateCall struct { // SetInstanceTemplate: Sets the instance template to use when creating // new instances or recreating instances in this group. Existing -// instances are not affected. +// instances are not affected. (== suppress_warning http-rest-shadowed +// ==) func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84948,6 +95978,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Heade func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85013,7 +96044,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ @@ -85077,7 +96108,7 @@ type RegionInstanceGroupManagersSetTargetPoolsCall struct { // SetTargetPools: Modifies the target pools to which all new instances // in this group are assigned. Existing instances in the group are not -// affected. +// affected. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85133,6 +96164,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85198,7 +96230,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", + // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.setTargetPools", // "parameterOrder": [ @@ -85260,7 +96292,8 @@ type RegionInstanceGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified instance group resource. +// Get: Returns the specified instance group resource. (== +// suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85306,6 +96339,7 @@ func (c *RegionInstanceGroupsGetCall) Header() http.Header { func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85369,7 +96403,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Returns the specified instance group resource.", + // "description": "Returns the specified instance group resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionInstanceGroups.get", // "parameterOrder": [ @@ -85424,7 +96458,7 @@ type RegionInstanceGroupsListCall struct { } // List: Retrieves the list of instance group resources contained within -// the specified region. +// the specified region. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85532,6 +96566,7 @@ func (c *RegionInstanceGroupsListCall) Header() http.Header { func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85594,7 +96629,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } return ret, nil // { - // "description": "Retrieves the list of instance group resources contained within the specified region.", + // "description": "Retrieves the list of instance group resources contained within the specified region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionInstanceGroups.list", // "parameterOrder": [ @@ -85689,7 +96724,8 @@ type RegionInstanceGroupsListInstancesCall struct { // ListInstances: Lists the instances in the specified instance group // and displays information about the named ports. Depending on the // specified options, this method can list all instances or only the -// instances that are running. +// instances that are running. (== suppress_warning http-rest-shadowed +// ==) func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85789,6 +96825,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85855,7 +96892,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ @@ -85958,7 +96995,7 @@ type RegionInstanceGroupsSetNamedPortsCall struct { } // SetNamedPorts: Sets the named ports for the specified regional -// instance group. +// instance group. (== suppress_warning http-rest-shadowed ==) func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86014,6 +97051,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86079,7 +97117,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the named ports for the specified regional instance group.", + // "description": "Sets the named ports for the specified regional instance group. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.regionInstanceGroups.setNamedPorts", // "parameterOrder": [ @@ -86141,6 +97179,7 @@ type RegionOperationsDeleteCall struct { } // Delete: Deletes the specified region-specific Operations resource. +// (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -86177,6 +97216,7 @@ func (c *RegionOperationsDeleteCall) Header() http.Header { func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86212,7 +97252,7 @@ func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Deletes the specified region-specific Operations resource.", + // "description": "Deletes the specified region-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.regionOperations.delete", // "parameterOrder": [ @@ -86265,7 +97305,8 @@ type RegionOperationsGetCall struct { header_ http.Header } -// Get: Retrieves the specified region-specific Operations resource. +// Get: Retrieves the specified region-specific Operations resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -86312,6 +97353,7 @@ func (c *RegionOperationsGetCall) Header() http.Header { func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86375,7 +97417,7 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified region-specific Operations resource.", + // "description": "Retrieves the specified region-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionOperations.get", // "parameterOrder": [ @@ -86432,7 +97474,7 @@ type RegionOperationsListCall struct { } // List: Retrieves a list of Operation resources contained within the -// specified region. +// specified region. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -86541,6 +97583,7 @@ func (c *RegionOperationsListCall) Header() http.Header { func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86603,7 +97646,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "description": "Retrieves a list of Operation resources contained within the specified region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regionOperations.list", // "parameterOrder": [ @@ -86683,6 +97726,4211 @@ func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } +// method id "compute.regionSslCertificates.delete": + +type RegionSslCertificatesDeleteCall struct { + s *Service + project string + region string + sslCertificate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified SslCertificate resource in the region. +// (== suppress_warning http-rest-shadowed ==) +func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { + c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.sslCertificate = sslCertificate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSslCertificatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "sslCertificate": c.sslCertificate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSslCertificates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified SslCertificate resource in the region. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "DELETE", + // "id": "compute.regionSslCertificates.delete", + // "parameterOrder": [ + // "project", + // "region", + // "sslCertificate" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionSslCertificates.get": + +type RegionSslCertificatesGetCall struct { + s *Service + project string + region string + sslCertificate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified SslCertificate resource in the specified +// region. Get a list of available SSL certificates by making a list() +// request. (== suppress_warning http-rest-shadowed ==) +func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { + c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.sslCertificate = sslCertificate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSslCertificatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "sslCertificate": c.sslCertificate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslCertificate.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCertificate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionSslCertificates.get", + // "parameterOrder": [ + // "project", + // "region", + // "sslCertificate" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "response": { + // "$ref": "SslCertificate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionSslCertificates.insert": + +type RegionSslCertificatesInsertCall struct { + s *Service + project string + region string + sslcertificate *SslCertificate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a SslCertificate resource in the specified project +// and region using the data included in the request (== +// suppress_warning http-rest-shadowed ==) +func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { + c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.sslcertificate = sslcertificate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSslCertificatesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSslCertificates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionSslCertificates.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/sslCertificates", + // "request": { + // "$ref": "SslCertificate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionSslCertificates.list": + +type RegionSslCertificatesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of SslCertificate resources available to the +// specified project in the specified region. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { + c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSslCertificatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertificateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCertificateList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionSslCertificates.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/sslCertificates", + // "response": { + // "$ref": "SslCertificateList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionTargetHttpProxies.delete": + +type RegionTargetHttpProxiesDeleteCall struct { + s *Service + project string + region string + targetHttpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified TargetHttpProxy resource. (== +// suppress_warning http-rest-shadowed ==) +func (r *RegionTargetHttpProxiesService) Delete(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesDeleteCall { + c := &RegionTargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpProxy = targetHttpProxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetHttpProxy resource. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "DELETE", + // "id": "compute.regionTargetHttpProxies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpProxies.get": + +type RegionTargetHttpProxiesGetCall struct { + s *Service + project string + region string + targetHttpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified TargetHttpProxy resource in the specified +// region. Gets a list of available target HTTP proxies by making a +// list() request. (== suppress_warning http-rest-shadowed ==) +func (r *RegionTargetHttpProxiesService) Get(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesGetCall { + c := &RegionTargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpProxy = targetHttpProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpProxiesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpProxies.get" call. +// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetHttpProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionTargetHttpProxies.get", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + // "response": { + // "$ref": "TargetHttpProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionTargetHttpProxies.insert": + +type RegionTargetHttpProxiesInsertCall struct { + s *Service + project string + region string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a TargetHttpProxy resource in the specified project +// and region using the data included in the request. (== +// suppress_warning http-rest-shadowed ==) +func (r *RegionTargetHttpProxiesService) Insert(project string, region string, targethttpproxy *TargetHttpProxy) *RegionTargetHttpProxiesInsertCall { + c := &RegionTargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targethttpproxy = targethttpproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpProxiesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpProxies.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpProxies", + // "request": { + // "$ref": "TargetHttpProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpProxies.list": + +type RegionTargetHttpProxiesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of TargetHttpProxy resources available to +// the specified project in the specified region. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionTargetHttpProxiesService) List(project string, region string) *RegionTargetHttpProxiesListCall { + c := &RegionTargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHttpProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTargetHttpProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetHttpProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpProxiesListCall) Context(ctx context.Context) *RegionTargetHttpProxiesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpProxiesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpProxies.list" call. +// Exactly one of *TargetHttpProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpProxyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionTargetHttpProxies.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpProxies", + // "response": { + // "$ref": "TargetHttpProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionTargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionTargetHttpProxies.setUrlMap": + +type RegionTargetHttpProxiesSetUrlMapCall struct { + s *Service + project string + region string + targetHttpProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetUrlMap: Changes the URL map for TargetHttpProxy. (== +// suppress_warning http-rest-shadowed ==) +func (r *RegionTargetHttpProxiesService) SetUrlMap(project string, region string, targetHttpProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpProxiesSetUrlMapCall { + c := &RegionTargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpProxy = targetHttpProxy + c.urlmapreference = urlmapreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpProxiesSetUrlMapCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesSetUrlMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpProxiesSetUrlMapCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpProxy. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy to set a URL map for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.delete": + +type RegionTargetHttpsProxiesDeleteCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified TargetHttpsProxy resource. (== +// suppress_warning http-rest-shadowed ==) +func (r *RegionTargetHttpsProxiesService) Delete(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesDeleteCall { + c := &RegionTargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpsProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpsProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetHttpsProxy resource. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "DELETE", + // "id": "compute.regionTargetHttpsProxies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.get": + +type RegionTargetHttpsProxiesGetCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified TargetHttpsProxy resource in the specified +// region. Gets a list of available target HTTP proxies by making a +// list() request. (== suppress_warning http-rest-shadowed ==) +func (r *RegionTargetHttpsProxiesService) Get(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesGetCall { + c := &RegionTargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpsProxiesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.get" call. +// Exactly one of *TargetHttpsProxy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpsProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionTargetHttpsProxies.get", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "response": { + // "$ref": "TargetHttpsProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.insert": + +type RegionTargetHttpsProxiesInsertCall struct { + s *Service + project string + region string + targethttpsproxy *TargetHttpsProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a TargetHttpsProxy resource in the specified project +// and region using the data included in the request. (== +// suppress_warning http-rest-shadowed ==) +func (r *RegionTargetHttpsProxiesService) Insert(project string, region string, targethttpsproxy *TargetHttpsProxy) *RegionTargetHttpsProxiesInsertCall { + c := &RegionTargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targethttpsproxy = targethttpsproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpsProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpsProxiesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpsProxies", + // "request": { + // "$ref": "TargetHttpsProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.list": + +type RegionTargetHttpsProxiesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of TargetHttpsProxy resources available to +// the specified project in the specified region. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionTargetHttpsProxiesService) List(project string, region string) *RegionTargetHttpsProxiesListCall { + c := &RegionTargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesListCall) Context(ctx context.Context) *RegionTargetHttpsProxiesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.list" call. +// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpsProxyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionTargetHttpsProxies.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpsProxies", + // "response": { + // "$ref": "TargetHttpsProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionTargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionTargetHttpsProxies.setSslCertificates": + +type RegionTargetHttpsProxiesSetSslCertificatesCall struct { + s *Service + project string + region string + targetHttpsProxy string + regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +// (== suppress_warning http-rest-shadowed ==) +func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + c.regiontargethttpsproxiessetsslcertificatesrequest = regiontargethttpsproxiessetsslcertificatesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiontargethttpsproxiessetsslcertificatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.setSslCertificates" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Replaces SslCertificates for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.setSslCertificates", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + // "request": { + // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.setUrlMap": + +type RegionTargetHttpsProxiesSetUrlMapCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetUrlMap: Changes the URL map for TargetHttpsProxy. (== +// suppress_warning http-rest-shadowed ==) +func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { + c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy to set a URL map for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionUrlMaps.delete": + +type RegionUrlMapsDeleteCall struct { + s *Service + project string + region string + urlMap string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified UrlMap resource. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionUrlMapsService) Delete(project string, region string, urlMap string) *RegionUrlMapsDeleteCall { + c := &RegionUrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.urlMap = urlMap + return c +} + +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsDeleteCall) RequestId(requestId string) *RegionUrlMapsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionUrlMapsDeleteCall) Fields(s ...googleapi.Field) *RegionUrlMapsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionUrlMapsDeleteCall) Context(ctx context.Context) *RegionUrlMapsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionUrlMapsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionUrlMaps.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified UrlMap resource. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "DELETE", + // "id": "compute.regionUrlMaps.delete", + // "parameterOrder": [ + // "project", + // "region", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + // "location": "query", + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionUrlMaps.get": + +type RegionUrlMapsGetCall struct { + s *Service + project string + region string + urlMap string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified UrlMap resource. Gets a list of available +// URL maps by making a list() request. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionUrlMapsService) Get(project string, region string, urlMap string) *RegionUrlMapsGetCall { + c := &RegionUrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.urlMap = urlMap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionUrlMapsGetCall) Fields(s ...googleapi.Field) *RegionUrlMapsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionUrlMapsGetCall) IfNoneMatch(entityTag string) *RegionUrlMapsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionUrlMapsGetCall) Context(ctx context.Context) *RegionUrlMapsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionUrlMapsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionUrlMaps.get" call. +// Exactly one of *UrlMap or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *UrlMap.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMap{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionUrlMaps.get", + // "parameterOrder": [ + // "project", + // "region", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "response": { + // "$ref": "UrlMap" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionUrlMaps.insert": + +type RegionUrlMapsInsertCall struct { + s *Service + project string + region string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a UrlMap resource in the specified project using the +// data included in the request. (== suppress_warning http-rest-shadowed +// ==) +func (r *RegionUrlMapsService) Insert(project string, region string, urlmap *UrlMap) *RegionUrlMapsInsertCall { + c := &RegionUrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.urlmap = urlmap + return c +} + +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsInsertCall) RequestId(requestId string) *RegionUrlMapsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionUrlMapsInsertCall) Fields(s ...googleapi.Field) *RegionUrlMapsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionUrlMapsInsertCall) Context(ctx context.Context) *RegionUrlMapsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionUrlMapsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionUrlMaps.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a UrlMap resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionUrlMaps.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/urlMaps", + // "request": { + // "$ref": "UrlMap" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionUrlMaps.list": + +type RegionUrlMapsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of UrlMap resources available to the +// specified project in the specified region. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMapsListCall { + c := &RegionUrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionUrlMapsListCall) Filter(filter string) *RegionUrlMapsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionUrlMapsListCall) MaxResults(maxResults int64) *RegionUrlMapsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionUrlMapsListCall) OrderBy(orderBy string) *RegionUrlMapsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionUrlMapsListCall) Fields(s ...googleapi.Field) *RegionUrlMapsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionUrlMapsListCall) IfNoneMatch(entityTag string) *RegionUrlMapsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionUrlMapsListCall) Context(ctx context.Context) *RegionUrlMapsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionUrlMapsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionUrlMaps.list" call. +// Exactly one of *UrlMapList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *UrlMapList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMapList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.regionUrlMaps.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/urlMaps", + // "response": { + // "$ref": "UrlMapList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionUrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionUrlMaps.patch": + +type RegionUrlMapsPatchCall struct { + s *Service + project string + region string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified UrlMap resource with the data included +// in the request. This method supports PATCH semantics and uses JSON +// merge patch format and processing rules. (== suppress_warning +// http-rest-shadowed ==) +func (r *RegionUrlMapsService) Patch(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsPatchCall { + c := &RegionUrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.urlMap = urlMap + c.urlmap = urlmap + return c +} + +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsPatchCall) RequestId(requestId string) *RegionUrlMapsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionUrlMapsPatchCall) Fields(s ...googleapi.Field) *RegionUrlMapsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionUrlMapsPatchCall) Context(ctx context.Context) *RegionUrlMapsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionUrlMapsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionUrlMaps.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionUrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "PATCH", + // "id": "compute.regionUrlMaps.patch", + // "parameterOrder": [ + // "project", + // "region", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + // "location": "query", + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "request": { + // "$ref": "UrlMap" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionUrlMaps.update": + +type RegionUrlMapsUpdateCall struct { + s *Service + project string + region string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified UrlMap resource with the data included +// in the request. (== suppress_warning http-rest-shadowed ==) +func (r *RegionUrlMapsService) Update(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsUpdateCall { + c := &RegionUrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.urlMap = urlMap + c.urlmap = urlmap + return c +} + +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsUpdateCall) RequestId(requestId string) *RegionUrlMapsUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionUrlMapsUpdateCall) Fields(s ...googleapi.Field) *RegionUrlMapsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionUrlMapsUpdateCall) Context(ctx context.Context) *RegionUrlMapsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionUrlMapsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionUrlMaps.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified UrlMap resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "PUT", + // "id": "compute.regionUrlMaps.update", + // "parameterOrder": [ + // "project", + // "region", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + // "location": "query", + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "request": { + // "$ref": "UrlMap" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionUrlMaps.validate": + +type RegionUrlMapsValidateCall struct { + s *Service + project string + region string + urlMap string + regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Validate: Runs static validation for the UrlMap. In particular, the +// tests of the provided UrlMap will be run. Calling this method does +// NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==) +func (r *RegionUrlMapsService) Validate(project string, region string, urlMap string, regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest) *RegionUrlMapsValidateCall { + c := &RegionUrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.urlMap = urlMap + c.regionurlmapsvalidaterequest = regionurlmapsvalidaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionUrlMapsValidateCall) Fields(s ...googleapi.Field) *RegionUrlMapsValidateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionUrlMapsValidateCall) Context(ctx context.Context) *RegionUrlMapsValidateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionUrlMapsValidateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionurlmapsvalidaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}/validate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionUrlMaps.validate" call. +// Exactly one of *UrlMapsValidateResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UrlMapsValidateResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidateResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMapsValidateResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.regionUrlMaps.validate", + // "parameterOrder": [ + // "project", + // "region", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to be validated as.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/urlMaps/{urlMap}/validate", + // "request": { + // "$ref": "RegionUrlMapsValidateRequest" + // }, + // "response": { + // "$ref": "UrlMapsValidateResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regions.get": type RegionsGetCall struct { @@ -86696,7 +101944,8 @@ type RegionsGetCall struct { } // Get: Returns the specified Region resource. Gets a list of available -// regions by making a list() request. +// regions by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get func (r *RegionsService) Get(project string, region string) *RegionsGetCall { c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -86742,6 +101991,7 @@ func (c *RegionsGetCall) Header() http.Header { func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86804,7 +102054,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } return ret, nil // { - // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", + // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regions.get", // "parameterOrder": [ @@ -86852,7 +102102,7 @@ type RegionsListCall struct { } // List: Retrieves the list of region resources available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list func (r *RegionsService) List(project string) *RegionsListCall { c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -86960,6 +102210,7 @@ func (c *RegionsListCall) Header() http.Header { func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87021,7 +102272,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) } return ret, nil // { - // "description": "Retrieves the list of region resources available to the specified project.", + // "description": "Retrieves the list of region resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.regions.list", // "parameterOrder": [ @@ -87093,6 +102344,3267 @@ func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) } } +// method id "compute.reservations.aggregatedList": + +type ReservationsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of reservations. (== +// suppress_warning http-rest-shadowed ==) +func (r *ReservationsService) AggregatedList(project string) *ReservationsAggregatedListCall { + c := &ReservationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ReservationsAggregatedListCall) Filter(filter string) *ReservationsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ReservationsAggregatedListCall) MaxResults(maxResults int64) *ReservationsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ReservationsAggregatedListCall) OrderBy(orderBy string) *ReservationsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ReservationsAggregatedListCall) PageToken(pageToken string) *ReservationsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ReservationsAggregatedListCall) Fields(s ...googleapi.Field) *ReservationsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsAggregatedListCall) IfNoneMatch(entityTag string) *ReservationsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ReservationsAggregatedListCall) Context(ctx context.Context) *ReservationsAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ReservationsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/reservations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.reservations.aggregatedList" call. +// Exactly one of *ReservationAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ReservationAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*ReservationAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ReservationAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of reservations. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.reservations.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/reservations", + // "response": { + // "$ref": "ReservationAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ReservationsAggregatedListCall) Pages(ctx context.Context, f func(*ReservationAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.reservations.delete": + +type ReservationsDeleteCall struct { + s *Service + project string + zone string + reservation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified reservation. (== suppress_warning +// http-rest-shadowed ==) +func (r *ReservationsService) Delete(project string, zone string, reservation string) *ReservationsDeleteCall { + c := &ReservationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.reservation = reservation + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ReservationsDeleteCall) RequestId(requestId string) *ReservationsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ReservationsDeleteCall) Fields(s ...googleapi.Field) *ReservationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ReservationsDeleteCall) Context(ctx context.Context) *ReservationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ReservationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ReservationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{reservation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "reservation": c.reservation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.reservations.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ReservationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified reservation. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "DELETE", + // "id": "compute.reservations.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "reservation" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "reservation": { + // "description": "Name of the reservation to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/reservations/{reservation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.reservations.get": + +type ReservationsGetCall struct { + s *Service + project string + zone string + reservation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves information about the specified reservation. (== +// suppress_warning http-rest-shadowed ==) +func (r *ReservationsService) Get(project string, zone string, reservation string) *ReservationsGetCall { + c := &ReservationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.reservation = reservation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ReservationsGetCall) Fields(s ...googleapi.Field) *ReservationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsGetCall) IfNoneMatch(entityTag string) *ReservationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ReservationsGetCall) Context(ctx context.Context) *ReservationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ReservationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ReservationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{reservation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "reservation": c.reservation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.reservations.get" call. +// Exactly one of *Reservation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Reservation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ReservationsGetCall) Do(opts ...googleapi.CallOption) (*Reservation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Reservation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves information about the specified reservation. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.reservations.get", + // "parameterOrder": [ + // "project", + // "zone", + // "reservation" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "reservation": { + // "description": "Name of the reservation to retrieve.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/reservations/{reservation}", + // "response": { + // "$ref": "Reservation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.reservations.getIamPolicy": + +type ReservationsGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) +func (r *ReservationsService) GetIamPolicy(project string, zone string, resource string) *ReservationsGetIamPolicyCall { + c := &ReservationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ReservationsGetIamPolicyCall) Fields(s ...googleapi.Field) *ReservationsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsGetIamPolicyCall) IfNoneMatch(entityTag string) *ReservationsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ReservationsGetIamPolicyCall) Context(ctx context.Context) *ReservationsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ReservationsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ReservationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.reservations.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ReservationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.reservations.getIamPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/reservations/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.reservations.insert": + +type ReservationsInsertCall struct { + s *Service + project string + zone string + reservation *Reservation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new reservation. For more information, read +// Reserving zonal resources. (== suppress_warning http-rest-shadowed +// ==) +func (r *ReservationsService) Insert(project string, zone string, reservation *Reservation) *ReservationsInsertCall { + c := &ReservationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.reservation = reservation + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ReservationsInsertCall) RequestId(requestId string) *ReservationsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ReservationsInsertCall) Fields(s ...googleapi.Field) *ReservationsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ReservationsInsertCall) Context(ctx context.Context) *ReservationsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ReservationsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ReservationsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reservation) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.reservations.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ReservationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new reservation. For more information, read Reserving zonal resources. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.reservations.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/reservations", + // "request": { + // "$ref": "Reservation" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.reservations.list": + +type ReservationsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: A list of all the reservations that have been configured for +// the specified project in specified zone. (== suppress_warning +// http-rest-shadowed ==) +func (r *ReservationsService) List(project string, zone string) *ReservationsListCall { + c := &ReservationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ReservationsListCall) Filter(filter string) *ReservationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ReservationsListCall) MaxResults(maxResults int64) *ReservationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ReservationsListCall) OrderBy(orderBy string) *ReservationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ReservationsListCall) PageToken(pageToken string) *ReservationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ReservationsListCall) Fields(s ...googleapi.Field) *ReservationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsListCall) IfNoneMatch(entityTag string) *ReservationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ReservationsListCall) Context(ctx context.Context) *ReservationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ReservationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ReservationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.reservations.list" call. +// Exactly one of *ReservationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ReservationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ReservationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "A list of all the reservations that have been configured for the specified project in specified zone. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.reservations.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/reservations", + // "response": { + // "$ref": "ReservationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ReservationsListCall) Pages(ctx context.Context, f func(*ReservationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.reservations.resize": + +type ReservationsResizeCall struct { + s *Service + project string + zone string + reservation string + reservationsresizerequest *ReservationsResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Resize: Resizes the reservation (applicable to standalone +// reservations only). For more information, read Modifying +// reservations. (== suppress_warning http-rest-shadowed ==) +func (r *ReservationsService) Resize(project string, zone string, reservation string, reservationsresizerequest *ReservationsResizeRequest) *ReservationsResizeCall { + c := &ReservationsResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.reservation = reservation + c.reservationsresizerequest = reservationsresizerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ReservationsResizeCall) RequestId(requestId string) *ReservationsResizeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ReservationsResizeCall) Fields(s ...googleapi.Field) *ReservationsResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ReservationsResizeCall) Context(ctx context.Context) *ReservationsResizeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ReservationsResizeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ReservationsResizeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reservationsresizerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{reservation}/resize") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "reservation": c.reservation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.reservations.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ReservationsResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resizes the reservation (applicable to standalone reservations only). For more information, read Modifying reservations. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.reservations.resize", + // "parameterOrder": [ + // "project", + // "zone", + // "reservation" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "reservation": { + // "description": "Name of the reservation to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/reservations/{reservation}/resize", + // "request": { + // "$ref": "ReservationsResizeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.reservations.setIamPolicy": + +type ReservationsSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) +func (r *ReservationsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *ReservationsSetIamPolicyCall { + c := &ReservationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ReservationsSetIamPolicyCall) Fields(s ...googleapi.Field) *ReservationsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ReservationsSetIamPolicyCall) Context(ctx context.Context) *ReservationsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ReservationsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ReservationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.reservations.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ReservationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.reservations.setIamPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/reservations/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.reservations.testIamPermissions": + +type ReservationsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. (== suppress_warning http-rest-shadowed ==) +func (r *ReservationsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *ReservationsTestIamPermissionsCall { + c := &ReservationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ReservationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ReservationsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ReservationsTestIamPermissionsCall) Context(ctx context.Context) *ReservationsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ReservationsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ReservationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.reservations.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ReservationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.reservations.testIamPermissions", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/reservations/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.resourcePolicies.aggregatedList": + +type ResourcePoliciesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of resource policies. +// (== suppress_warning http-rest-shadowed ==) +func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePoliciesAggregatedListCall { + c := &ResourcePoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ResourcePoliciesAggregatedListCall) Filter(filter string) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ResourcePoliciesAggregatedListCall) MaxResults(maxResults int64) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ResourcePoliciesAggregatedListCall) OrderBy(orderBy string) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ResourcePoliciesAggregatedListCall) PageToken(pageToken string) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcePoliciesAggregatedListCall) Fields(s ...googleapi.Field) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcePoliciesAggregatedListCall) IfNoneMatch(entityTag string) *ResourcePoliciesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcePoliciesAggregatedListCall) Context(ctx context.Context) *ResourcePoliciesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcePoliciesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/resourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.resourcePolicies.aggregatedList" call. +// Exactly one of *ResourcePolicyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResourcePolicyAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ResourcePolicyAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResourcePolicyAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of resource policies. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.resourcePolicies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/resourcePolicies", + // "response": { + // "$ref": "ResourcePolicyAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourcePoliciesAggregatedListCall) Pages(ctx context.Context, f func(*ResourcePolicyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.resourcePolicies.delete": + +type ResourcePoliciesDeleteCall struct { + s *Service + project string + region string + resourcePolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified resource policy. (== suppress_warning +// http-rest-shadowed ==) +func (r *ResourcePoliciesService) Delete(project string, region string, resourcePolicy string) *ResourcePoliciesDeleteCall { + c := &ResourcePoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resourcePolicy = resourcePolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ResourcePoliciesDeleteCall) RequestId(requestId string) *ResourcePoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcePoliciesDeleteCall) Fields(s ...googleapi.Field) *ResourcePoliciesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcePoliciesDeleteCall) Context(ctx context.Context) *ResourcePoliciesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcePoliciesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resourcePolicy": c.resourcePolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.resourcePolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ResourcePoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified resource policy. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "DELETE", + // "id": "compute.resourcePolicies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "resourcePolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resourcePolicy": { + // "description": "Name of the resource policy to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.resourcePolicies.get": + +type ResourcePoliciesGetCall struct { + s *Service + project string + region string + resourcePolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves all information of the specified resource policy. (== +// suppress_warning http-rest-shadowed ==) +func (r *ResourcePoliciesService) Get(project string, region string, resourcePolicy string) *ResourcePoliciesGetCall { + c := &ResourcePoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resourcePolicy = resourcePolicy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcePoliciesGetCall) Fields(s ...googleapi.Field) *ResourcePoliciesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcePoliciesGetCall) IfNoneMatch(entityTag string) *ResourcePoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcePoliciesGetCall) Context(ctx context.Context) *ResourcePoliciesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcePoliciesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resourcePolicy": c.resourcePolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.resourcePolicies.get" call. +// Exactly one of *ResourcePolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ResourcePolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResourcePolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves all information of the specified resource policy. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.resourcePolicies.get", + // "parameterOrder": [ + // "project", + // "region", + // "resourcePolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resourcePolicy": { + // "description": "Name of the resource policy to retrieve.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + // "response": { + // "$ref": "ResourcePolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.resourcePolicies.getIamPolicy": + +type ResourcePoliciesGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) +func (r *ResourcePoliciesService) GetIamPolicy(project string, region string, resource string) *ResourcePoliciesGetIamPolicyCall { + c := &ResourcePoliciesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcePoliciesGetIamPolicyCall) Fields(s ...googleapi.Field) *ResourcePoliciesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcePoliciesGetIamPolicyCall) IfNoneMatch(entityTag string) *ResourcePoliciesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcePoliciesGetIamPolicyCall) Context(ctx context.Context) *ResourcePoliciesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcePoliciesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcePoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.resourcePolicies.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ResourcePoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.resourcePolicies.getIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.resourcePolicies.insert": + +type ResourcePoliciesInsertCall struct { + s *Service + project string + region string + resourcepolicy *ResourcePolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new resource policy. (== suppress_warning +// http-rest-shadowed ==) +func (r *ResourcePoliciesService) Insert(project string, region string, resourcepolicy *ResourcePolicy) *ResourcePoliciesInsertCall { + c := &ResourcePoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resourcepolicy = resourcepolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ResourcePoliciesInsertCall) RequestId(requestId string) *ResourcePoliciesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcePoliciesInsertCall) Fields(s ...googleapi.Field) *ResourcePoliciesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcePoliciesInsertCall) Context(ctx context.Context) *ResourcePoliciesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcePoliciesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcepolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.resourcePolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new resource policy. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.resourcePolicies.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/resourcePolicies", + // "request": { + // "$ref": "ResourcePolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.resourcePolicies.list": + +type ResourcePoliciesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: A list all the resource policies that have been configured for +// the specified project in specified region. (== suppress_warning +// http-rest-shadowed ==) +func (r *ResourcePoliciesService) List(project string, region string) *ResourcePoliciesListCall { + c := &ResourcePoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ResourcePoliciesListCall) Filter(filter string) *ResourcePoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ResourcePoliciesListCall) MaxResults(maxResults int64) *ResourcePoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ResourcePoliciesListCall) OrderBy(orderBy string) *ResourcePoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ResourcePoliciesListCall) PageToken(pageToken string) *ResourcePoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcePoliciesListCall) Fields(s ...googleapi.Field) *ResourcePoliciesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcePoliciesListCall) IfNoneMatch(entityTag string) *ResourcePoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcePoliciesListCall) Context(ctx context.Context) *ResourcePoliciesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcePoliciesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.resourcePolicies.list" call. +// Exactly one of *ResourcePolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ResourcePolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePolicyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResourcePolicyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "A list all the resource policies that have been configured for the specified project in specified region. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.resourcePolicies.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/resourcePolicies", + // "response": { + // "$ref": "ResourcePolicyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourcePoliciesListCall) Pages(ctx context.Context, f func(*ResourcePolicyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.resourcePolicies.setIamPolicy": + +type ResourcePoliciesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) +func (r *ResourcePoliciesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *ResourcePoliciesSetIamPolicyCall { + c := &ResourcePoliciesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcePoliciesSetIamPolicyCall) Fields(s ...googleapi.Field) *ResourcePoliciesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcePoliciesSetIamPolicyCall) Context(ctx context.Context) *ResourcePoliciesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcePoliciesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcePoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.resourcePolicies.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ResourcePoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.resourcePolicies.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.resourcePolicies.testIamPermissions": + +type ResourcePoliciesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. (== suppress_warning http-rest-shadowed ==) +func (r *ResourcePoliciesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ResourcePoliciesTestIamPermissionsCall { + c := &ResourcePoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourcePoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ResourcePoliciesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourcePoliciesTestIamPermissionsCall) Context(ctx context.Context) *ResourcePoliciesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ResourcePoliciesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.resourcePolicies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.resourcePolicies.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.routers.aggregatedList": type RoutersAggregatedListCall struct { @@ -87104,7 +105616,8 @@ type RoutersAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of routers. +// AggregatedList: Retrieves an aggregated list of routers. (== +// suppress_warning http-rest-shadowed ==) func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87211,6 +105724,7 @@ func (c *RoutersAggregatedListCall) Header() http.Header { func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87272,7 +105786,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg } return ret, nil // { - // "description": "Retrieves an aggregated list of routers.", + // "description": "Retrieves an aggregated list of routers. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.routers.aggregatedList", // "parameterOrder": [ @@ -87356,7 +105870,8 @@ type RoutersDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified Router resource. +// Delete: Deletes the specified Router resource. (== suppress_warning +// http-rest-shadowed ==) func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87411,6 +105926,7 @@ func (c *RoutersDeleteCall) Header() http.Header { func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87471,7 +105987,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Router resource.", + // "description": "Deletes the specified Router resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.routers.delete", // "parameterOrder": [ @@ -87533,7 +106049,8 @@ type RoutersGetCall struct { } // Get: Returns the specified Router resource. Gets a list of available -// routers by making a list() request. +// routers by making a list() request. (== suppress_warning +// http-rest-shadowed ==) func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87579,6 +106096,7 @@ func (c *RoutersGetCall) Header() http.Header { func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87642,7 +106160,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } return ret, nil // { - // "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", + // "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.routers.get", // "parameterOrder": [ @@ -87700,7 +106218,7 @@ type RoutersGetNatMappingInfoCall struct { } // GetNatMappingInfo: Retrieves runtime Nat mapping information of VM -// endpoints. +// endpoints. (== suppress_warning http-rest-shadowed ==) func (r *RoutersService) GetNatMappingInfo(project string, region string, router string) *RoutersGetNatMappingInfoCall { c := &RoutersGetNatMappingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87809,6 +106327,7 @@ func (c *RoutersGetNatMappingInfoCall) Header() http.Header { func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87872,7 +106391,7 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp } return ret, nil // { - // "description": "Retrieves runtime Nat mapping information of VM endpoints.", + // "description": "Retrieves runtime Nat mapping information of VM endpoints. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.routers.getNatMappingInfo", // "parameterOrder": [ @@ -87974,7 +106493,7 @@ type RoutersGetRouterStatusCall struct { } // GetRouterStatus: Retrieves runtime information of the specified -// router. +// router. (== suppress_warning http-rest-shadowed ==) func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88020,6 +106539,7 @@ func (c *RoutersGetRouterStatusCall) Header() http.Header { func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88083,7 +106603,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt } return ret, nil // { - // "description": "Retrieves runtime information of the specified router.", + // "description": "Retrieves runtime information of the specified router. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.routers.getRouterStatus", // "parameterOrder": [ @@ -88140,7 +106660,8 @@ type RoutersInsertCall struct { } // Insert: Creates a Router resource in the specified project and region -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88195,6 +106716,7 @@ func (c *RoutersInsertCall) Header() http.Header { func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88259,7 +106781,7 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Router resource in the specified project and region using the data included in the request.", + // "description": "Creates a Router resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.routers.insert", // "parameterOrder": [ @@ -88315,7 +106837,7 @@ type RoutersListCall struct { } // List: Retrieves a list of Router resources available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) func (r *RoutersService) List(project string, region string) *RoutersListCall { c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88423,6 +106945,7 @@ func (c *RoutersListCall) Header() http.Header { func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88485,7 +107008,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) } return ret, nil // { - // "description": "Retrieves a list of Router resources available to the specified project.", + // "description": "Retrieves a list of Router resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.routers.list", // "parameterOrder": [ @@ -88580,7 +107103,8 @@ type RoutersPatchCall struct { // Patch: Patches the specified Router resource with the data included // in the request. This method supports PATCH semantics and uses JSON -// merge patch format and processing rules. +// merge patch format and processing rules. (== suppress_warning +// http-rest-shadowed ==) func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88636,6 +107160,7 @@ func (c *RoutersPatchCall) Header() http.Header { func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88701,7 +107226,7 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.routers.patch", // "parameterOrder": [ @@ -88767,7 +107292,7 @@ type RoutersPreviewCall struct { // Preview: Preview fields auto-generated during router create and // update operations. Calling this method does NOT create or update the -// router. +// router. (== suppress_warning http-rest-shadowed ==) func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88804,6 +107329,7 @@ func (c *RoutersPreviewCall) Header() http.Header { func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88869,7 +107395,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe } return ret, nil // { - // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.routers.preview", // "parameterOrder": [ @@ -88930,7 +107456,10 @@ type RoutersUpdateCall struct { } // Update: Updates the specified Router resource with the data included -// in the request. +// in the request. This method conforms to PUT semantics, which requests +// that the state of the target resource be created or replaced with the +// state defined by the representation enclosed in the request message +// payload. (== suppress_warning http-rest-shadowed ==) func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88986,6 +107515,7 @@ func (c *RoutersUpdateCall) Header() http.Header { func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89051,7 +107581,7 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the specified Router resource with the data included in the request.", + // "description": "Updates the specified Router resource with the data included in the request. This method conforms to PUT semantics, which requests that the state of the target resource be created or replaced with the state defined by the representation enclosed in the request message payload. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.routers.update", // "parameterOrder": [ @@ -89113,7 +107643,8 @@ type RoutesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified Route resource. +// Delete: Deletes the specified Route resource. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -89168,6 +107699,7 @@ func (c *RoutesDeleteCall) Header() http.Header { func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89227,7 +107759,7 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Route resource.", + // "description": "Deletes the specified Route resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.routes.delete", // "parameterOrder": [ @@ -89280,7 +107812,8 @@ type RoutesGetCall struct { } // Get: Returns the specified Route resource. Gets a list of available -// routes by making a list() request. +// routes by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get func (r *RoutesService) Get(project string, route string) *RoutesGetCall { c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -89326,6 +107859,7 @@ func (c *RoutesGetCall) Header() http.Header { func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89388,7 +107922,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } return ret, nil // { - // "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", + // "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.routes.get", // "parameterOrder": [ @@ -89436,7 +107970,8 @@ type RoutesInsertCall struct { } // Insert: Creates a Route resource in the specified project using the -// data included in the request. +// data included in the request. (== suppress_warning http-rest-shadowed +// ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -89491,6 +108026,7 @@ func (c *RoutesInsertCall) Header() http.Header { func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89554,7 +108090,7 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Route resource in the specified project using the data included in the request.", + // "description": "Creates a Route resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.routes.insert", // "parameterOrder": [ @@ -89601,7 +108137,7 @@ type RoutesListCall struct { } // List: Retrieves the list of Route resources available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list func (r *RoutesService) List(project string) *RoutesListCall { c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -89709,6 +108245,7 @@ func (c *RoutesListCall) Header() http.Header { func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89770,7 +108307,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { } return ret, nil // { - // "description": "Retrieves the list of Route resources available to the specified project.", + // "description": "Retrieves the list of Route resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.routes.list", // "parameterOrder": [ @@ -89854,7 +108391,8 @@ type SecurityPoliciesAddRuleCall struct { header_ http.Header } -// AddRule: Inserts a rule into a security policy. +// AddRule: Inserts a rule into a security policy. (== suppress_warning +// http-rest-shadowed ==) func (r *SecurityPoliciesService) AddRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesAddRuleCall { c := &SecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89890,6 +108428,7 @@ func (c *SecurityPoliciesAddRuleCall) Header() http.Header { func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89954,7 +108493,7 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Inserts a rule into a security policy.", + // "description": "Inserts a rule into a security policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.securityPolicies.addRule", // "parameterOrder": [ @@ -90003,7 +108542,8 @@ type SecurityPoliciesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified policy. +// Delete: Deletes the specified policy. (== suppress_warning +// http-rest-shadowed ==) func (r *SecurityPoliciesService) Delete(project string, securityPolicy string) *SecurityPoliciesDeleteCall { c := &SecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90057,6 +108597,7 @@ func (c *SecurityPoliciesDeleteCall) Header() http.Header { func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90116,7 +108657,7 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified policy.", + // "description": "Deletes the specified policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.securityPolicies.delete", // "parameterOrder": [ @@ -90169,7 +108710,7 @@ type SecurityPoliciesGetCall struct { } // Get: List all of the ordered rules present in a single specified -// policy. +// policy. (== suppress_warning http-rest-shadowed ==) func (r *SecurityPoliciesService) Get(project string, securityPolicy string) *SecurityPoliciesGetCall { c := &SecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90214,6 +108755,7 @@ func (c *SecurityPoliciesGetCall) Header() http.Header { func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90276,7 +108818,7 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol } return ret, nil // { - // "description": "List all of the ordered rules present in a single specified policy.", + // "description": "List all of the ordered rules present in a single specified policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.securityPolicies.get", // "parameterOrder": [ @@ -90324,7 +108866,8 @@ type SecurityPoliciesGetRuleCall struct { header_ http.Header } -// GetRule: Gets a rule at the specified priority. +// GetRule: Gets a rule at the specified priority. (== suppress_warning +// http-rest-shadowed ==) func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90376,6 +108919,7 @@ func (c *SecurityPoliciesGetRuleCall) Header() http.Header { func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90438,7 +108982,7 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit } return ret, nil // { - // "description": "Gets a rule at the specified priority.", + // "description": "Gets a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.securityPolicies.getRule", // "parameterOrder": [ @@ -90492,7 +109036,7 @@ type SecurityPoliciesInsertCall struct { } // Insert: Creates a new policy in the specified project using the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) func (r *SecurityPoliciesService) Insert(project string, securitypolicy *SecurityPolicy) *SecurityPoliciesInsertCall { c := &SecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90546,6 +109090,7 @@ func (c *SecurityPoliciesInsertCall) Header() http.Header { func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90609,7 +109154,7 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a new policy in the specified project using the data included in the request.", + // "description": "Creates a new policy in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.securityPolicies.insert", // "parameterOrder": [ @@ -90656,7 +109201,7 @@ type SecurityPoliciesListCall struct { } // List: List all the policies that have been configured for the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall { c := &SecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90763,6 +109308,7 @@ func (c *SecurityPoliciesListCall) Header() http.Header { func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90824,7 +109370,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo } return ret, nil // { - // "description": "List all the policies that have been configured for the specified project.", + // "description": "List all the policies that have been configured for the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.securityPolicies.list", // "parameterOrder": [ @@ -90909,7 +109455,7 @@ type SecurityPoliciesPatchCall struct { } // Patch: Patches the specified policy with the data included in the -// request. +// request. (== suppress_warning http-rest-shadowed ==) func (r *SecurityPoliciesService) Patch(project string, securityPolicy string, securitypolicy *SecurityPolicy) *SecurityPoliciesPatchCall { c := &SecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90964,6 +109510,7 @@ func (c *SecurityPoliciesPatchCall) Header() http.Header { func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91028,7 +109575,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Patches the specified policy with the data included in the request.", + // "description": "Patches the specified policy with the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.securityPolicies.patch", // "parameterOrder": [ @@ -91083,7 +109630,8 @@ type SecurityPoliciesPatchRuleCall struct { header_ http.Header } -// PatchRule: Patches a rule at the specified priority. +// PatchRule: Patches a rule at the specified priority. (== +// suppress_warning http-rest-shadowed ==) func (r *SecurityPoliciesService) PatchRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesPatchRuleCall { c := &SecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91126,6 +109674,7 @@ func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91190,7 +109739,7 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Patches a rule at the specified priority.", + // "description": "Patches a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.securityPolicies.patchRule", // "parameterOrder": [ @@ -91245,7 +109794,8 @@ type SecurityPoliciesRemoveRuleCall struct { header_ http.Header } -// RemoveRule: Deletes a rule at the specified priority. +// RemoveRule: Deletes a rule at the specified priority. (== +// suppress_warning http-rest-shadowed ==) func (r *SecurityPoliciesService) RemoveRule(project string, securityPolicy string) *SecurityPoliciesRemoveRuleCall { c := &SecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91287,6 +109837,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91346,7 +109897,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Deletes a rule at the specified priority.", + // "description": "Deletes a rule at the specified priority. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.securityPolicies.removeRule", // "parameterOrder": [ @@ -91404,7 +109955,8 @@ type SnapshotsDeleteCall struct { // deletion is needed for subsequent snapshots, the data will be moved // to the next corresponding snapshot. // -// For more information, see Deleting snapshots. +// For more information, see Deleting snapshots. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -91459,6 +110011,7 @@ func (c *SnapshotsDeleteCall) Header() http.Header { func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91518,7 +110071,7 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots.", + // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.snapshots.delete", // "parameterOrder": [ @@ -91571,7 +110124,8 @@ type SnapshotsGetCall struct { } // Get: Returns the specified Snapshot resource. Gets a list of -// available snapshots by making a list() request. +// available snapshots by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -91617,6 +110171,7 @@ func (c *SnapshotsGetCall) Header() http.Header { func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91679,7 +110234,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } return ret, nil // { - // "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + // "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.snapshots.get", // "parameterOrder": [ @@ -91728,7 +110283,8 @@ type SnapshotsGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) func (r *SnapshotsService) GetIamPolicy(project string, resource string) *SnapshotsGetIamPolicyCall { c := &SnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91773,6 +110329,7 @@ func (c *SnapshotsGetIamPolicyCall) Header() http.Header { func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91835,7 +110392,7 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.snapshots.getIamPolicy", // "parameterOrder": [ @@ -91883,7 +110440,7 @@ type SnapshotsListCall struct { } // List: Retrieves the list of Snapshot resources contained within the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list func (r *SnapshotsService) List(project string) *SnapshotsListCall { c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -91991,6 +110548,7 @@ func (c *SnapshotsListCall) Header() http.Header { func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92052,7 +110610,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err } return ret, nil // { - // "description": "Retrieves the list of Snapshot resources contained within the specified project.", + // "description": "Retrieves the list of Snapshot resources contained within the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.snapshots.list", // "parameterOrder": [ @@ -92137,7 +110695,8 @@ type SnapshotsSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) func (r *SnapshotsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *SnapshotsSetIamPolicyCall { c := &SnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92173,6 +110732,7 @@ func (c *SnapshotsSetIamPolicyCall) Header() http.Header { func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92237,7 +110797,7 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.snapshots.setIamPolicy", // "parameterOrder": [ @@ -92288,7 +110848,8 @@ type SnapshotsSetLabelsCall struct { } // SetLabels: Sets the labels on a snapshot. To learn more about labels, -// read the Labeling Resources documentation. +// read the Labeling Resources documentation. (== suppress_warning +// http-rest-shadowed ==) func (r *SnapshotsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SnapshotsSetLabelsCall { c := &SnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92324,6 +110885,7 @@ func (c *SnapshotsSetLabelsCall) Header() http.Header { func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92388,7 +110950,7 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.snapshots.setLabels", // "parameterOrder": [ @@ -92439,7 +111001,7 @@ type SnapshotsTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *SnapshotsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SnapshotsTestIamPermissionsCall { c := &SnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92475,6 +111037,7 @@ func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92539,7 +111102,7 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.snapshots.testIamPermissions", // "parameterOrder": [ @@ -92578,6 +111141,260 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } +// method id "compute.sslCertificates.aggregatedList": + +type SslCertificatesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all SslCertificate resources, +// regional and global, available to the specified project. (== +// suppress_warning http-rest-shadowed ==) +func (r *SslCertificatesService) AggregatedList(project string) *SslCertificatesAggregatedListCall { + c := &SslCertificatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *SslCertificatesAggregatedListCall) Filter(filter string) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SslCertificatesAggregatedListCall) MaxResults(maxResults int64) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SslCertificatesAggregatedListCall) OrderBy(orderBy string) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslCertificatesAggregatedListCall) PageToken(pageToken string) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertificatesAggregatedListCall) Fields(s ...googleapi.Field) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertificatesAggregatedListCall) IfNoneMatch(entityTag string) *SslCertificatesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertificatesAggregatedListCall) Context(ctx context.Context) *SslCertificatesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslCertificatesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslCertificatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/sslCertificates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslCertificates.aggregatedList" call. +// Exactly one of *SslCertificateAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *SslCertificateAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*SslCertificateAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCertificateAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.sslCertificates.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/sslCertificates", + // "response": { + // "$ref": "SslCertificateAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslCertificatesAggregatedListCall) Pages(ctx context.Context, f func(*SslCertificateAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.sslCertificates.delete": type SslCertificatesDeleteCall struct { @@ -92589,7 +111406,8 @@ type SslCertificatesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified SslCertificate resource. +// Delete: Deletes the specified SslCertificate resource. (== +// suppress_warning http-rest-shadowed ==) func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92643,6 +111461,7 @@ func (c *SslCertificatesDeleteCall) Header() http.Header { func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92702,7 +111521,7 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified SslCertificate resource.", + // "description": "Deletes the specified SslCertificate resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.sslCertificates.delete", // "parameterOrder": [ @@ -92755,7 +111574,8 @@ type SslCertificatesGetCall struct { } // Get: Returns the specified SslCertificate resource. Gets a list of -// available SSL certificates by making a list() request. +// available SSL certificates by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92800,6 +111620,7 @@ func (c *SslCertificatesGetCall) Header() http.Header { func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92862,7 +111683,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica } return ret, nil // { - // "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", + // "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.sslCertificates.get", // "parameterOrder": [ @@ -92910,7 +111731,8 @@ type SslCertificatesInsertCall struct { } // Insert: Creates a SslCertificate resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -92964,6 +111786,7 @@ func (c *SslCertificatesInsertCall) Header() http.Header { func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93027,7 +111850,7 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + // "description": "Creates a SslCertificate resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.sslCertificates.insert", // "parameterOrder": [ @@ -93074,7 +111897,7 @@ type SslCertificatesListCall struct { } // List: Retrieves the list of SslCertificate resources available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93181,6 +112004,7 @@ func (c *SslCertificatesListCall) Header() http.Header { func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93242,7 +112066,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project.", + // "description": "Retrieves the list of SslCertificate resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.sslCertificates.list", // "parameterOrder": [ @@ -93327,7 +112151,7 @@ type SslPoliciesDeleteCall struct { // Delete: Deletes the specified SSL policy. The SSL policy resource can // be deleted only if it is not in use by any TargetHttpsProxy or -// TargetSslProxy resources. +// TargetSslProxy resources. (== suppress_warning http-rest-shadowed ==) func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93381,6 +112205,7 @@ func (c *SslPoliciesDeleteCall) Header() http.Header { func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93440,7 +112265,7 @@ func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.sslPolicies.delete", // "parameterOrder": [ @@ -93492,7 +112317,7 @@ type SslPoliciesGetCall struct { } // Get: Lists all of the ordered rules present in a single specified -// policy. +// policy. (== suppress_warning http-rest-shadowed ==) func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93537,6 +112362,7 @@ func (c *SslPoliciesGetCall) Header() http.Header { func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93599,7 +112425,7 @@ func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error } return ret, nil // { - // "description": "Lists all of the ordered rules present in a single specified policy.", + // "description": "Lists all of the ordered rules present in a single specified policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.sslPolicies.get", // "parameterOrder": [ @@ -93646,7 +112472,8 @@ type SslPoliciesInsertCall struct { } // Insert: Returns the specified SSL policy resource. Gets a list of -// available SSL policies by making a list() request. +// available SSL policies by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPoliciesInsertCall { c := &SslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93700,6 +112527,7 @@ func (c *SslPoliciesInsertCall) Header() http.Header { func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93763,7 +112591,7 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + // "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.sslPolicies.insert", // "parameterOrder": [ @@ -93810,7 +112638,7 @@ type SslPoliciesListCall struct { } // List: Lists all the SSL policies that have been configured for the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { c := &SslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93917,6 +112745,7 @@ func (c *SslPoliciesListCall) Header() http.Header { func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93978,7 +112807,7 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList } return ret, nil // { - // "description": "Lists all the SSL policies that have been configured for the specified project.", + // "description": "Lists all the SSL policies that have been configured for the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.sslPolicies.list", // "parameterOrder": [ @@ -94062,7 +112891,8 @@ type SslPoliciesListAvailableFeaturesCall struct { } // ListAvailableFeatures: Lists all features that can be specified in -// the SSL policy when using custom profile. +// the SSL policy when using custom profile. (== suppress_warning +// http-rest-shadowed ==) func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesListAvailableFeaturesCall { c := &SslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94169,6 +112999,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94232,7 +113063,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + // "description": "Lists all features that can be specified in the SSL policy when using custom profile. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.sslPolicies.listAvailableFeatures", // "parameterOrder": [ @@ -94296,7 +113127,7 @@ type SslPoliciesPatchCall struct { } // Patch: Patches the specified SSL policy with the data included in the -// request. +// request. (== suppress_warning http-rest-shadowed ==) func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94351,6 +113182,7 @@ func (c *SslPoliciesPatchCall) Header() http.Header { func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94415,7 +113247,7 @@ func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Patches the specified SSL policy with the data included in the request.", + // "description": "Patches the specified SSL policy with the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.sslPolicies.patch", // "parameterOrder": [ @@ -94468,7 +113300,8 @@ type SubnetworksAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of subnetworks. +// AggregatedList: Retrieves an aggregated list of subnetworks. (== +// suppress_warning http-rest-shadowed ==) func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregatedListCall { c := &SubnetworksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94575,6 +113408,7 @@ func (c *SubnetworksAggregatedListCall) Header() http.Header { func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94636,7 +113470,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne } return ret, nil // { - // "description": "Retrieves an aggregated list of subnetworks.", + // "description": "Retrieves an aggregated list of subnetworks. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.subnetworks.aggregatedList", // "parameterOrder": [ @@ -94720,7 +113554,8 @@ type SubnetworksDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified subnetwork. +// Delete: Deletes the specified subnetwork. (== suppress_warning +// http-rest-shadowed ==) func (r *SubnetworksService) Delete(project string, region string, subnetwork string) *SubnetworksDeleteCall { c := &SubnetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94775,6 +113610,7 @@ func (c *SubnetworksDeleteCall) Header() http.Header { func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94835,7 +113671,7 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified subnetwork.", + // "description": "Deletes the specified subnetwork. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.subnetworks.delete", // "parameterOrder": [ @@ -94897,7 +113733,7 @@ type SubnetworksExpandIpCidrRangeCall struct { } // ExpandIpCidrRange: Expands the IP CIDR range of the subnetwork to a -// specified value. +// specified value. (== suppress_warning http-rest-shadowed ==) func (r *SubnetworksService) ExpandIpCidrRange(project string, region string, subnetwork string, subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest) *SubnetworksExpandIpCidrRangeCall { c := &SubnetworksExpandIpCidrRangeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94953,6 +113789,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95018,7 +113855,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Expands the IP CIDR range of the subnetwork to a specified value.", + // "description": "Expands the IP CIDR range of the subnetwork to a specified value. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.subnetworks.expandIpCidrRange", // "parameterOrder": [ @@ -95083,7 +113920,8 @@ type SubnetworksGetCall struct { } // Get: Returns the specified subnetwork. Gets a list of available -// subnetworks list() request. +// subnetworks list() request. (== suppress_warning http-rest-shadowed +// ==) func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95129,6 +113967,7 @@ func (c *SubnetworksGetCall) Header() http.Header { func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95192,7 +114031,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro } return ret, nil // { - // "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", + // "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.subnetworks.get", // "parameterOrder": [ @@ -95250,7 +114089,8 @@ type SubnetworksGetIamPolicyCall struct { } // GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. +// empty if no such policy or resource exists. (== suppress_warning +// http-rest-shadowed ==) func (r *SubnetworksService) GetIamPolicy(project string, region string, resource string) *SubnetworksGetIamPolicyCall { c := &SubnetworksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95296,6 +114136,7 @@ func (c *SubnetworksGetIamPolicyCall) Header() http.Header { func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95359,7 +114200,7 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.subnetworks.getIamPolicy", // "parameterOrder": [ @@ -95416,7 +114257,7 @@ type SubnetworksInsertCall struct { } // Insert: Creates a subnetwork in the specified project using the data -// included in the request. +// included in the request. (== suppress_warning http-rest-shadowed ==) func (r *SubnetworksService) Insert(project string, region string, subnetwork *Subnetwork) *SubnetworksInsertCall { c := &SubnetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95471,6 +114312,7 @@ func (c *SubnetworksInsertCall) Header() http.Header { func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95535,7 +114377,7 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a subnetwork in the specified project using the data included in the request.", + // "description": "Creates a subnetwork in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.subnetworks.insert", // "parameterOrder": [ @@ -95591,7 +114433,7 @@ type SubnetworksListCall struct { } // List: Retrieves a list of subnetworks available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) func (r *SubnetworksService) List(project string, region string) *SubnetworksListCall { c := &SubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95699,6 +114541,7 @@ func (c *SubnetworksListCall) Header() http.Header { func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95761,7 +114604,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, } return ret, nil // { - // "description": "Retrieves a list of subnetworks available to the specified project.", + // "description": "Retrieves a list of subnetworks available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.subnetworks.list", // "parameterOrder": [ @@ -95852,7 +114695,10 @@ type SubnetworksListUsableCall struct { header_ http.Header } -// ListUsable: Retrieves an aggregated list of usable subnetworks. +// ListUsable: Retrieves an aggregated list of all usable subnetworks in +// the project. The list contains all of the subnetworks in the project +// and the subnetworks that were shared by a Shared VPC host project. +// (== suppress_warning http-rest-shadowed ==) func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCall { c := &SubnetworksListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95959,6 +114805,7 @@ func (c *SubnetworksListUsableCall) Header() http.Header { func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96020,7 +114867,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub } return ret, nil // { - // "description": "Retrieves an aggregated list of usable subnetworks.", + // "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.subnetworks.listUsable", // "parameterOrder": [ @@ -96108,7 +114955,8 @@ type SubnetworksPatchCall struct { // Patch: Patches the specified subnetwork with the data included in the // request. Only certain fields can up updated with a patch request as // indicated in the field descriptions. You must specify the current -// fingeprint of the subnetwork resource being patched. +// fingeprint of the subnetwork resource being patched. (== +// suppress_warning http-rest-shadowed ==) func (r *SubnetworksService) Patch(project string, region string, subnetwork string, subnetwork2 *Subnetwork) *SubnetworksPatchCall { c := &SubnetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96118,6 +114966,21 @@ func (r *SubnetworksService) Patch(project string, region string, subnetwork str return c } +// DrainTimeoutSeconds sets the optional parameter +// "drainTimeoutSeconds": The drain timeout specifies the upper bound in +// seconds on the amount of time allowed to drain connections from the +// current ACTIVE subnetwork to the current BACKUP subnetwork. The drain +// timeout is only applicable when the following conditions are true: - +// the subnetwork being patched has purpose = +// INTERNAL_HTTPS_LOAD_BALANCER - the subnetwork being patched has role +// = BACKUP - the patch request is setting the role to ACTIVE. Note that +// after this patch operation the roles of the ACTIVE and BACKUP +// subnetworks will be swapped. +func (c *SubnetworksPatchCall) DrainTimeoutSeconds(drainTimeoutSeconds int64) *SubnetworksPatchCall { + c.urlParams_.Set("drainTimeoutSeconds", fmt.Sprint(drainTimeoutSeconds)) + return c +} + // RequestId sets the optional parameter "requestId": An optional // request ID to identify requests. Specify a unique request ID so that // if you must retry your request, the server will know to ignore the @@ -96164,6 +115027,7 @@ func (c *SubnetworksPatchCall) Header() http.Header { func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96229,7 +115093,7 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched.", + // "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.subnetworks.patch", // "parameterOrder": [ @@ -96238,6 +115102,12 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "subnetwork" // ], // "parameters": { + // "drainTimeoutSeconds": { + // "description": "The drain timeout specifies the upper bound in seconds on the amount of time allowed to drain connections from the current ACTIVE subnetwork to the current BACKUP subnetwork. The drain timeout is only applicable when the following conditions are true: - the subnetwork being patched has purpose = INTERNAL_HTTPS_LOAD_BALANCER - the subnetwork being patched has role = BACKUP - the patch request is setting the role to ACTIVE. Note that after this patch operation the roles of the ACTIVE and BACKUP subnetworks will be swapped.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -96294,7 +115164,8 @@ type SubnetworksSetIamPolicyCall struct { } // SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. +// resource. Replaces any existing policy. (== suppress_warning +// http-rest-shadowed ==) func (r *SubnetworksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *SubnetworksSetIamPolicyCall { c := &SubnetworksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96331,6 +115202,7 @@ func (c *SubnetworksSetIamPolicyCall) Header() http.Header { func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96396,7 +115268,7 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.subnetworks.setIamPolicy", // "parameterOrder": [ @@ -96457,7 +115329,7 @@ type SubnetworksSetPrivateIpGoogleAccessCall struct { // SetPrivateIpGoogleAccess: Set whether VMs in this subnet can access // Google services without assigning external IP addresses through -// Private Google Access. +// Private Google Access. (== suppress_warning http-rest-shadowed ==) func (r *SubnetworksService) SetPrivateIpGoogleAccess(project string, region string, subnetwork string, subnetworkssetprivateipgoogleaccessrequest *SubnetworksSetPrivateIpGoogleAccessRequest) *SubnetworksSetPrivateIpGoogleAccessCall { c := &SubnetworksSetPrivateIpGoogleAccessCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96513,6 +115385,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96578,7 +115451,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", + // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.subnetworks.setPrivateIpGoogleAccess", // "parameterOrder": [ @@ -96643,7 +115516,7 @@ type SubnetworksTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// specified resource. (== suppress_warning http-rest-shadowed ==) func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96680,6 +115553,7 @@ func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96745,7 +115619,7 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.subnetworks.testIamPermissions", // "parameterOrder": [ @@ -96792,6 +115666,260 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } +// method id "compute.targetHttpProxies.aggregatedList": + +type TargetHttpProxiesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all TargetHttpProxy resources, +// regional and global, available to the specified project. (== +// suppress_warning http-rest-shadowed ==) +func (r *TargetHttpProxiesService) AggregatedList(project string) *TargetHttpProxiesAggregatedListCall { + c := &TargetHttpProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *TargetHttpProxiesAggregatedListCall) Filter(filter string) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *TargetHttpProxiesAggregatedListCall) MaxResults(maxResults int64) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetHttpProxiesAggregatedListCall) OrderBy(orderBy string) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetHttpProxiesAggregatedListCall) PageToken(pageToken string) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesAggregatedListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpProxiesAggregatedListCall) IfNoneMatch(entityTag string) *TargetHttpProxiesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesAggregatedListCall) Context(ctx context.Context) *TargetHttpProxiesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetHttpProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpProxies.aggregatedList" call. +// Exactly one of *TargetHttpProxyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetHttpProxyAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpProxyAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.targetHttpProxies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/targetHttpProxies", + // "response": { + // "$ref": "TargetHttpProxyAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetHttpProxiesAggregatedListCall) Pages(ctx context.Context, f func(*TargetHttpProxyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.targetHttpProxies.delete": type TargetHttpProxiesDeleteCall struct { @@ -96803,7 +115931,8 @@ type TargetHttpProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetHttpProxy resource. +// Delete: Deletes the specified TargetHttpProxy resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -96858,6 +115987,7 @@ func (c *TargetHttpProxiesDeleteCall) Header() http.Header { func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96917,7 +116047,7 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified TargetHttpProxy resource.", + // "description": "Deletes the specified TargetHttpProxy resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.targetHttpProxies.delete", // "parameterOrder": [ @@ -96970,7 +116100,8 @@ type TargetHttpProxiesGetCall struct { } // Get: Returns the specified TargetHttpProxy resource. Gets a list of -// available target HTTP proxies by making a list() request. +// available target HTTP proxies by making a list() request. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -97016,6 +116147,7 @@ func (c *TargetHttpProxiesGetCall) Header() http.Header { func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97078,7 +116210,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + // "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetHttpProxies.get", // "parameterOrder": [ @@ -97126,7 +116258,8 @@ type TargetHttpProxiesInsertCall struct { } // Insert: Creates a TargetHttpProxy resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -97181,6 +116314,7 @@ func (c *TargetHttpProxiesInsertCall) Header() http.Header { func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97244,7 +116378,7 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetHttpProxies.insert", // "parameterOrder": [ @@ -97291,7 +116425,7 @@ type TargetHttpProxiesListCall struct { } // List: Retrieves the list of TargetHttpProxy resources available to -// the specified project. +// the specified project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -97399,6 +116533,7 @@ func (c *TargetHttpProxiesListCall) Header() http.Header { func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97460,7 +116595,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetHttpProxies.list", // "parameterOrder": [ @@ -97544,7 +116679,8 @@ type TargetHttpProxiesSetUrlMapCall struct { header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpProxy. +// SetUrlMap: Changes the URL map for TargetHttpProxy. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -97600,6 +116736,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97664,7 +116801,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the URL map for TargetHttpProxy.", + // "description": "Changes the URL map for TargetHttpProxy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetHttpProxies.setUrlMap", // "parameterOrder": [ @@ -97707,6 +116844,260 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper } +// method id "compute.targetHttpsProxies.aggregatedList": + +type TargetHttpsProxiesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all TargetHttpsProxy resources, +// regional and global, available to the specified project. (== +// suppress_warning http-rest-shadowed ==) +func (r *TargetHttpsProxiesService) AggregatedList(project string) *TargetHttpsProxiesAggregatedListCall { + c := &TargetHttpsProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *TargetHttpsProxiesAggregatedListCall) Filter(filter string) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *TargetHttpsProxiesAggregatedListCall) MaxResults(maxResults int64) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetHttpsProxiesAggregatedListCall) OrderBy(orderBy string) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetHttpsProxiesAggregatedListCall) PageToken(pageToken string) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesAggregatedListCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpsProxiesAggregatedListCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesAggregatedListCall) Context(ctx context.Context) *TargetHttpsProxiesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetHttpsProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpsProxies.aggregatedList" call. +// Exactly one of *TargetHttpsProxyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetHttpsProxyAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpsProxyAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.targetHttpsProxies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/targetHttpsProxies", + // "response": { + // "$ref": "TargetHttpsProxyAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetHttpsProxiesAggregatedListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.targetHttpsProxies.delete": type TargetHttpsProxiesDeleteCall struct { @@ -97718,7 +117109,8 @@ type TargetHttpsProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetHttpsProxy resource. +// Delete: Deletes the specified TargetHttpsProxy resource. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97772,6 +117164,7 @@ func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97831,7 +117224,7 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Deletes the specified TargetHttpsProxy resource.", + // "description": "Deletes the specified TargetHttpsProxy resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.targetHttpsProxies.delete", // "parameterOrder": [ @@ -97884,7 +117277,8 @@ type TargetHttpsProxiesGetCall struct { } // Get: Returns the specified TargetHttpsProxy resource. Gets a list of -// available target HTTPS proxies by making a list() request. +// available target HTTPS proxies by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97929,6 +117323,7 @@ func (c *TargetHttpsProxiesGetCall) Header() http.Header { func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97991,7 +117386,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", + // "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetHttpsProxies.get", // "parameterOrder": [ @@ -98039,7 +117434,8 @@ type TargetHttpsProxiesInsertCall struct { } // Insert: Creates a TargetHttpsProxy resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98093,6 +117489,7 @@ func (c *TargetHttpsProxiesInsertCall) Header() http.Header { func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98156,7 +117553,7 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", + // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.insert", // "parameterOrder": [ @@ -98203,7 +117600,7 @@ type TargetHttpsProxiesListCall struct { } // List: Retrieves the list of TargetHttpsProxy resources available to -// the specified project. +// the specified project. (== suppress_warning http-rest-shadowed ==) func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98310,6 +117707,7 @@ func (c *TargetHttpsProxiesListCall) Header() http.Header { func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98371,7 +117769,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt } return ret, nil // { - // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetHttpsProxies.list", // "parameterOrder": [ @@ -98456,6 +117854,7 @@ type TargetHttpsProxiesSetQuicOverrideCall struct { } // SetQuicOverride: Sets the QUIC override policy for TargetHttpsProxy. +// (== suppress_warning http-rest-shadowed ==) func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsProxy string, targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest) *TargetHttpsProxiesSetQuicOverrideCall { c := &TargetHttpsProxiesSetQuicOverrideCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98510,6 +117909,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98574,7 +117974,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the QUIC override policy for TargetHttpsProxy.", + // "description": "Sets the QUIC override policy for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.setQuicOverride", // "parameterOrder": [ @@ -98629,6 +118029,7 @@ type TargetHttpsProxiesSetSslCertificatesCall struct { } // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +// (== suppress_warning http-rest-shadowed ==) func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98683,6 +118084,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98747,7 +118149,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Replaces SslCertificates for TargetHttpsProxy.", + // "description": "Replaces SslCertificates for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.setSslCertificates", // "parameterOrder": [ @@ -98806,7 +118208,7 @@ type TargetHttpsProxiesSetSslPolicyCall struct { // policy specifies the server-side support for SSL features. This // affects connections between clients and the HTTPS proxy load // balancer. They do not affect the connection between the load balancer -// and the backends. +// and the backends. (== suppress_warning http-rest-shadowed ==) func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -98861,6 +118263,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98925,7 +118328,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.setSslPolicy", // "parameterOrder": [ @@ -98979,7 +118382,8 @@ type TargetHttpsProxiesSetUrlMapCall struct { header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpsProxy. +// SetUrlMap: Changes the URL map for TargetHttpsProxy. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -99034,6 +118438,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99098,7 +118503,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Changes the URL map for TargetHttpsProxy.", + // "description": "Changes the URL map for TargetHttpsProxy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetHttpsProxies.setUrlMap", // "parameterOrder": [ @@ -99152,7 +118557,8 @@ type TargetInstancesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of target instances. +// AggregatedList: Retrieves an aggregated list of target instances. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -99260,6 +118666,7 @@ func (c *TargetInstancesAggregatedListCall) Header() http.Header { func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99321,7 +118728,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Retrieves an aggregated list of target instances.", + // "description": "Retrieves an aggregated list of target instances. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetInstances.aggregatedList", // "parameterOrder": [ @@ -99405,7 +118812,8 @@ type TargetInstancesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetInstance resource. +// Delete: Deletes the specified TargetInstance resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -99461,6 +118869,7 @@ func (c *TargetInstancesDeleteCall) Header() http.Header { func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99521,7 +118930,7 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified TargetInstance resource.", + // "description": "Deletes the specified TargetInstance resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.targetInstances.delete", // "parameterOrder": [ @@ -99583,7 +118992,8 @@ type TargetInstancesGetCall struct { } // Get: Returns the specified TargetInstance resource. Gets a list of -// available target instances by making a list() request. +// available target instances by making a list() request. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -99630,6 +119040,7 @@ func (c *TargetInstancesGetCall) Header() http.Header { func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99693,7 +119104,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan } return ret, nil // { - // "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + // "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetInstances.get", // "parameterOrder": [ @@ -99750,7 +119161,8 @@ type TargetInstancesInsertCall struct { } // Insert: Creates a TargetInstance resource in the specified project -// and zone using the data included in the request. +// and zone using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -99806,6 +119218,7 @@ func (c *TargetInstancesInsertCall) Header() http.Header { func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99870,7 +119283,7 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetInstances.insert", // "parameterOrder": [ @@ -99926,7 +119339,8 @@ type TargetInstancesListCall struct { } // List: Retrieves a list of TargetInstance resources available to the -// specified project and zone. +// specified project and zone. (== suppress_warning http-rest-shadowed +// ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -100035,6 +119449,7 @@ func (c *TargetInstancesListCall) Header() http.Header { func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100097,7 +119512,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta } return ret, nil // { - // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetInstances.list", // "parameterOrder": [ @@ -100190,7 +119605,8 @@ type TargetPoolsAddHealthCheckCall struct { header_ http.Header } -// AddHealthCheck: Adds health check URLs to a target pool. +// AddHealthCheck: Adds health check URLs to a target pool. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -100247,6 +119663,7 @@ func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100312,7 +119729,7 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Adds health check URLs to a target pool.", + // "description": "Adds health check URLs to a target pool. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetPools.addHealthCheck", // "parameterOrder": [ @@ -100376,7 +119793,8 @@ type TargetPoolsAddInstanceCall struct { header_ http.Header } -// AddInstance: Adds an instance to a target pool. +// AddInstance: Adds an instance to a target pool. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -100433,6 +119851,7 @@ func (c *TargetPoolsAddInstanceCall) Header() http.Header { func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100498,7 +119917,7 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Adds an instance to a target pool.", + // "description": "Adds an instance to a target pool. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetPools.addInstance", // "parameterOrder": [ @@ -100560,7 +119979,8 @@ type TargetPoolsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of target pools. +// AggregatedList: Retrieves an aggregated list of target pools. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -100668,6 +120088,7 @@ func (c *TargetPoolsAggregatedListCall) Header() http.Header { func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100729,7 +120150,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe } return ret, nil // { - // "description": "Retrieves an aggregated list of target pools.", + // "description": "Retrieves an aggregated list of target pools. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetPools.aggregatedList", // "parameterOrder": [ @@ -100813,7 +120234,8 @@ type TargetPoolsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified target pool. +// Delete: Deletes the specified target pool. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -100869,6 +120291,7 @@ func (c *TargetPoolsDeleteCall) Header() http.Header { func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100929,7 +120352,7 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified target pool.", + // "description": "Deletes the specified target pool. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.targetPools.delete", // "parameterOrder": [ @@ -100991,7 +120414,8 @@ type TargetPoolsGetCall struct { } // Get: Returns the specified target pool. Gets a list of available -// target pools by making a list() request. +// target pools by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -101038,6 +120462,7 @@ func (c *TargetPoolsGetCall) Header() http.Header { func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101101,7 +120526,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro } return ret, nil // { - // "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", + // "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetPools.get", // "parameterOrder": [ @@ -101159,7 +120584,8 @@ type TargetPoolsGetHealthCall struct { } // GetHealth: Gets the most recent health check results for each IP for -// the instance that is referenced by the given target pool. +// the instance that is referenced by the given target pool. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -101197,6 +120623,7 @@ func (c *TargetPoolsGetHealthCall) Header() http.Header { func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101262,7 +120689,7 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool } return ret, nil // { - // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", + // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetPools.getHealth", // "parameterOrder": [ @@ -101322,7 +120749,8 @@ type TargetPoolsInsertCall struct { } // Insert: Creates a target pool in the specified project and region -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -101378,6 +120806,7 @@ func (c *TargetPoolsInsertCall) Header() http.Header { func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101442,7 +120871,7 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a target pool in the specified project and region using the data included in the request.", + // "description": "Creates a target pool in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetPools.insert", // "parameterOrder": [ @@ -101498,7 +120927,7 @@ type TargetPoolsListCall struct { } // List: Retrieves a list of target pools available to the specified -// project and region. +// project and region. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -101607,6 +121036,7 @@ func (c *TargetPoolsListCall) Header() http.Header { func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101669,7 +121099,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, } return ret, nil // { - // "description": "Retrieves a list of target pools available to the specified project and region.", + // "description": "Retrieves a list of target pools available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetPools.list", // "parameterOrder": [ @@ -101762,7 +121192,8 @@ type TargetPoolsRemoveHealthCheckCall struct { header_ http.Header } -// RemoveHealthCheck: Removes health check URL from a target pool. +// RemoveHealthCheck: Removes health check URL from a target pool. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -101819,6 +121250,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101884,7 +121316,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Removes health check URL from a target pool.", + // "description": "Removes health check URL from a target pool. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetPools.removeHealthCheck", // "parameterOrder": [ @@ -101948,7 +121380,8 @@ type TargetPoolsRemoveInstanceCall struct { header_ http.Header } -// RemoveInstance: Removes instance URL from a target pool. +// RemoveInstance: Removes instance URL from a target pool. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -102005,6 +121438,7 @@ func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102070,7 +121504,7 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Removes instance URL from a target pool.", + // "description": "Removes instance URL from a target pool. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetPools.removeInstance", // "parameterOrder": [ @@ -102134,7 +121568,8 @@ type TargetPoolsSetBackupCall struct { header_ http.Header } -// SetBackup: Changes a backup target pool's configurations. +// SetBackup: Changes a backup target pool's configurations. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -102198,6 +121633,7 @@ func (c *TargetPoolsSetBackupCall) Header() http.Header { func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102263,7 +121699,7 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Changes a backup target pool's configurations.", + // "description": "Changes a backup target pool's configurations. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetPools.setBackup", // "parameterOrder": [ @@ -102331,7 +121767,8 @@ type TargetSslProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetSslProxy resource. +// Delete: Deletes the specified TargetSslProxy resource. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetSslProxiesService) Delete(project string, targetSslProxy string) *TargetSslProxiesDeleteCall { c := &TargetSslProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102385,6 +121822,7 @@ func (c *TargetSslProxiesDeleteCall) Header() http.Header { func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102444,7 +121882,7 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified TargetSslProxy resource.", + // "description": "Deletes the specified TargetSslProxy resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.targetSslProxies.delete", // "parameterOrder": [ @@ -102497,7 +121935,8 @@ type TargetSslProxiesGetCall struct { } // Get: Returns the specified TargetSslProxy resource. Gets a list of -// available target SSL proxies by making a list() request. +// available target SSL proxies by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetSslProxiesService) Get(project string, targetSslProxy string) *TargetSslProxiesGetCall { c := &TargetSslProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102542,6 +121981,7 @@ func (c *TargetSslProxiesGetCall) Header() http.Header { func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102604,7 +122044,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr } return ret, nil // { - // "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", + // "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetSslProxies.get", // "parameterOrder": [ @@ -102652,7 +122092,8 @@ type TargetSslProxiesInsertCall struct { } // Insert: Creates a TargetSslProxy resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *TargetSslProxiesService) Insert(project string, targetsslproxy *TargetSslProxy) *TargetSslProxiesInsertCall { c := &TargetSslProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102706,6 +122147,7 @@ func (c *TargetSslProxiesInsertCall) Header() http.Header { func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102769,7 +122211,7 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", + // "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetSslProxies.insert", // "parameterOrder": [ @@ -102816,7 +122258,7 @@ type TargetSslProxiesListCall struct { } // List: Retrieves the list of TargetSslProxy resources available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall { c := &TargetSslProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102923,6 +122365,7 @@ func (c *TargetSslProxiesListCall) Header() http.Header { func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102984,7 +122427,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP } return ret, nil // { - // "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", + // "description": "Retrieves the list of TargetSslProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetSslProxies.list", // "parameterOrder": [ @@ -103068,7 +122511,8 @@ type TargetSslProxiesSetBackendServiceCall struct { header_ http.Header } -// SetBackendService: Changes the BackendService for TargetSslProxy. +// SetBackendService: Changes the BackendService for TargetSslProxy. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103123,6 +122567,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103187,7 +122632,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the BackendService for TargetSslProxy.", + // "description": "Changes the BackendService for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setBackendService", // "parameterOrder": [ @@ -103242,7 +122687,8 @@ type TargetSslProxiesSetProxyHeaderCall struct { header_ http.Header } -// SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. +// SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103297,6 +122743,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103361,7 +122808,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes the ProxyHeaderType for TargetSslProxy.", + // "description": "Changes the ProxyHeaderType for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setProxyHeader", // "parameterOrder": [ @@ -103416,7 +122863,8 @@ type TargetSslProxiesSetSslCertificatesCall struct { header_ http.Header } -// SetSslCertificates: Changes SslCertificates for TargetSslProxy. +// SetSslCertificates: Changes SslCertificates for TargetSslProxy. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103471,6 +122919,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103535,7 +122984,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Changes SslCertificates for TargetSslProxy.", + // "description": "Changes SslCertificates for TargetSslProxy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslCertificates", // "parameterOrder": [ @@ -103594,6 +123043,7 @@ type TargetSslProxiesSetSslPolicyCall struct { // specifies the server-side support for SSL features. This affects // connections between clients and the SSL proxy load balancer. They do // not affect the connection between the load balancer and the backends. +// (== suppress_warning http-rest-shadowed ==) func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103648,6 +123098,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103712,7 +123163,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslPolicy", // "parameterOrder": [ @@ -103765,7 +123216,8 @@ type TargetTcpProxiesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified TargetTcpProxy resource. +// Delete: Deletes the specified TargetTcpProxy resource. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetTcpProxiesService) Delete(project string, targetTcpProxy string) *TargetTcpProxiesDeleteCall { c := &TargetTcpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103819,6 +123271,7 @@ func (c *TargetTcpProxiesDeleteCall) Header() http.Header { func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103878,7 +123331,7 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified TargetTcpProxy resource.", + // "description": "Deletes the specified TargetTcpProxy resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.targetTcpProxies.delete", // "parameterOrder": [ @@ -103931,7 +123384,8 @@ type TargetTcpProxiesGetCall struct { } // Get: Returns the specified TargetTcpProxy resource. Gets a list of -// available target TCP proxies by making a list() request. +// available target TCP proxies by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetTcpProxiesService) Get(project string, targetTcpProxy string) *TargetTcpProxiesGetCall { c := &TargetTcpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103976,6 +123430,7 @@ func (c *TargetTcpProxiesGetCall) Header() http.Header { func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104038,7 +123493,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr } return ret, nil // { - // "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + // "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetTcpProxies.get", // "parameterOrder": [ @@ -104086,7 +123541,8 @@ type TargetTcpProxiesInsertCall struct { } // Insert: Creates a TargetTcpProxy resource in the specified project -// using the data included in the request. +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *TargetTcpProxiesService) Insert(project string, targettcpproxy *TargetTcpProxy) *TargetTcpProxiesInsertCall { c := &TargetTcpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104140,6 +123596,7 @@ func (c *TargetTcpProxiesInsertCall) Header() http.Header { func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104203,7 +123660,7 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", + // "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetTcpProxies.insert", // "parameterOrder": [ @@ -104250,7 +123707,7 @@ type TargetTcpProxiesListCall struct { } // List: Retrieves the list of TargetTcpProxy resources available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall { c := &TargetTcpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104357,6 +123814,7 @@ func (c *TargetTcpProxiesListCall) Header() http.Header { func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104418,7 +123876,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP } return ret, nil // { - // "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", + // "description": "Retrieves the list of TargetTcpProxy resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetTcpProxies.list", // "parameterOrder": [ @@ -104502,7 +123960,8 @@ type TargetTcpProxiesSetBackendServiceCall struct { header_ http.Header } -// SetBackendService: Changes the BackendService for TargetTcpProxy. +// SetBackendService: Changes the BackendService for TargetTcpProxy. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpProxy string, targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest) *TargetTcpProxiesSetBackendServiceCall { c := &TargetTcpProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104557,6 +124016,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104621,7 +124081,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the BackendService for TargetTcpProxy.", + // "description": "Changes the BackendService for TargetTcpProxy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetTcpProxies.setBackendService", // "parameterOrder": [ @@ -104676,7 +124136,8 @@ type TargetTcpProxiesSetProxyHeaderCall struct { header_ http.Header } -// SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. +// SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy string, targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest) *TargetTcpProxiesSetProxyHeaderCall { c := &TargetTcpProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104731,6 +124192,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104795,7 +124257,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes the ProxyHeaderType for TargetTcpProxy.", + // "description": "Changes the ProxyHeaderType for TargetTcpProxy. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetTcpProxies.setProxyHeader", // "parameterOrder": [ @@ -104850,6 +124312,7 @@ type TargetVpnGatewaysAggregatedListCall struct { } // AggregatedList: Retrieves an aggregated list of target VPN gateways. +// (== suppress_warning http-rest-shadowed ==) func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGatewaysAggregatedListCall { c := &TargetVpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104956,6 +124419,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105017,7 +124481,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of target VPN gateways.", + // "description": "Retrieves an aggregated list of target VPN gateways. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetVpnGateways.aggregatedList", // "parameterOrder": [ @@ -105101,7 +124565,8 @@ type TargetVpnGatewaysDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified target VPN gateway. +// Delete: Deletes the specified target VPN gateway. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetVpnGatewaysService) Delete(project string, region string, targetVpnGateway string) *TargetVpnGatewaysDeleteCall { c := &TargetVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105156,6 +124621,7 @@ func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105216,7 +124682,7 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified target VPN gateway.", + // "description": "Deletes the specified target VPN gateway. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.targetVpnGateways.delete", // "parameterOrder": [ @@ -105278,7 +124744,8 @@ type TargetVpnGatewaysGetCall struct { } // Get: Returns the specified target VPN gateway. Gets a list of -// available target VPN gateways by making a list() request. +// available target VPN gateways by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *TargetVpnGatewaysService) Get(project string, region string, targetVpnGateway string) *TargetVpnGatewaysGetCall { c := &TargetVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105324,6 +124791,7 @@ func (c *TargetVpnGatewaysGetCall) Header() http.Header { func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105387,7 +124855,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG } return ret, nil // { - // "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + // "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetVpnGateways.get", // "parameterOrder": [ @@ -105444,7 +124912,8 @@ type TargetVpnGatewaysInsertCall struct { } // Insert: Creates a target VPN gateway in the specified project and -// region using the data included in the request. +// region using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *TargetVpnGatewaysService) Insert(project string, region string, targetvpngateway *TargetVpnGateway) *TargetVpnGatewaysInsertCall { c := &TargetVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105499,6 +124968,7 @@ func (c *TargetVpnGatewaysInsertCall) Header() http.Header { func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105563,7 +125033,7 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", + // "description": "Creates a target VPN gateway in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.targetVpnGateways.insert", // "parameterOrder": [ @@ -105619,7 +125089,8 @@ type TargetVpnGatewaysListCall struct { } // List: Retrieves a list of target VPN gateways available to the -// specified project and region. +// specified project and region. (== suppress_warning http-rest-shadowed +// ==) func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVpnGatewaysListCall { c := &TargetVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105727,6 +125198,7 @@ func (c *TargetVpnGatewaysListCall) Header() http.Header { func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105789,7 +125261,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn } return ret, nil // { - // "description": "Retrieves a list of target VPN gateways available to the specified project and region.", + // "description": "Retrieves a list of target VPN gateways available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.targetVpnGateways.list", // "parameterOrder": [ @@ -105869,6 +125341,260 @@ func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpn } } +// method id "compute.urlMaps.aggregatedList": + +type UrlMapsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all UrlMap resources, regional +// and global, available to the specified project. (== suppress_warning +// http-rest-shadowed ==) +func (r *UrlMapsService) AggregatedList(project string) *UrlMapsAggregatedListCall { + c := &UrlMapsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *UrlMapsAggregatedListCall) Filter(filter string) *UrlMapsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *UrlMapsAggregatedListCall) MaxResults(maxResults int64) *UrlMapsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *UrlMapsAggregatedListCall) OrderBy(orderBy string) *UrlMapsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *UrlMapsAggregatedListCall) PageToken(pageToken string) *UrlMapsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsAggregatedListCall) Fields(s ...googleapi.Field) *UrlMapsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UrlMapsAggregatedListCall) IfNoneMatch(entityTag string) *UrlMapsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsAggregatedListCall) Context(ctx context.Context) *UrlMapsAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UrlMapsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/urlMaps") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.urlMaps.aggregatedList" call. +// Exactly one of *UrlMapsAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UrlMapsAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMapsAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.urlMaps.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/urlMaps", + // "response": { + // "$ref": "UrlMapsAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *UrlMapsAggregatedListCall) Pages(ctx context.Context, f func(*UrlMapsAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.urlMaps.delete": type UrlMapsDeleteCall struct { @@ -105880,7 +125606,8 @@ type UrlMapsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified UrlMap resource. +// Delete: Deletes the specified UrlMap resource. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/delete func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall { c := &UrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -105935,6 +125662,7 @@ func (c *UrlMapsDeleteCall) Header() http.Header { func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105994,7 +125722,7 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified UrlMap resource.", + // "description": "Deletes the specified UrlMap resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.urlMaps.delete", // "parameterOrder": [ @@ -106047,7 +125775,8 @@ type UrlMapsGetCall struct { } // Get: Returns the specified UrlMap resource. Gets a list of available -// URL maps by making a list() request. +// URL maps by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/get func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall { c := &UrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -106093,6 +125822,7 @@ func (c *UrlMapsGetCall) Header() http.Header { func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106155,7 +125885,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { } return ret, nil // { - // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.urlMaps.get", // "parameterOrder": [ @@ -106203,7 +125933,8 @@ type UrlMapsInsertCall struct { } // Insert: Creates a UrlMap resource in the specified project using the -// data included in the request. +// data included in the request. (== suppress_warning http-rest-shadowed +// ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/insert func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall { c := &UrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -106258,6 +125989,7 @@ func (c *UrlMapsInsertCall) Header() http.Header { func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106321,7 +126053,7 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + // "description": "Creates a UrlMap resource in the specified project using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.urlMaps.insert", // "parameterOrder": [ @@ -106369,7 +126101,8 @@ type UrlMapsInvalidateCacheCall struct { } // InvalidateCache: Initiates a cache invalidation operation, -// invalidating the specified path, scoped to the specified UrlMap. +// invalidating the specified path, scoped to the specified UrlMap. (== +// suppress_warning http-rest-shadowed ==) func (r *UrlMapsService) InvalidateCache(project string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *UrlMapsInvalidateCacheCall { c := &UrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -106424,6 +126157,7 @@ func (c *UrlMapsInvalidateCacheCall) Header() http.Header { func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106488,7 +126222,7 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.urlMaps.invalidateCache", // "parameterOrder": [ @@ -106543,7 +126277,7 @@ type UrlMapsListCall struct { } // List: Retrieves the list of UrlMap resources available to the -// specified project. +// specified project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/list func (r *UrlMapsService) List(project string) *UrlMapsListCall { c := &UrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -106651,6 +126385,7 @@ func (c *UrlMapsListCall) Header() http.Header { func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106712,7 +126447,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) } return ret, nil // { - // "description": "Retrieves the list of UrlMap resources available to the specified project.", + // "description": "Retrieves the list of UrlMap resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.urlMaps.list", // "parameterOrder": [ @@ -106798,7 +126533,8 @@ type UrlMapsPatchCall struct { // Patch: Patches the specified UrlMap resource with the data included // in the request. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. +// JSON merge patch format and processing rules. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/patch func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall { c := &UrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -106854,6 +126590,7 @@ func (c *UrlMapsPatchCall) Header() http.Header { func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106918,7 +126655,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PATCH", // "id": "compute.urlMaps.patch", // "parameterOrder": [ @@ -106974,7 +126711,7 @@ type UrlMapsUpdateCall struct { } // Update: Updates the specified UrlMap resource with the data included -// in the request. +// in the request. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/update func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall { c := &UrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -107030,6 +126767,7 @@ func (c *UrlMapsUpdateCall) Header() http.Header { func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107094,7 +126832,7 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the specified UrlMap resource with the data included in the request.", + // "description": "Updates the specified UrlMap resource with the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "PUT", // "id": "compute.urlMaps.update", // "parameterOrder": [ @@ -107151,7 +126889,7 @@ type UrlMapsValidateCall struct { // Validate: Runs static validation for the UrlMap. In particular, the // tests of the provided UrlMap will be run. Calling this method does -// NOT create the UrlMap. +// NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/validate func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall { c := &UrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -107188,6 +126926,7 @@ func (c *UrlMapsValidateCall) Header() http.Header { func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107252,7 +126991,7 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate } return ret, nil // { - // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.urlMaps.validate", // "parameterOrder": [ @@ -107290,6 +127029,1566 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate } +// method id "compute.vpnGateways.aggregatedList": + +type VpnGatewaysAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of VPN gateways. (== +// suppress_warning http-rest-shadowed ==) +func (r *VpnGatewaysService) AggregatedList(project string) *VpnGatewaysAggregatedListCall { + c := &VpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *VpnGatewaysAggregatedListCall) Filter(filter string) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *VpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *VpnGatewaysAggregatedListCall) OrderBy(orderBy string) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *VpnGatewaysAggregatedListCall) PageToken(pageToken string) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysAggregatedListCall) Fields(s ...googleapi.Field) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnGatewaysAggregatedListCall) IfNoneMatch(entityTag string) *VpnGatewaysAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysAggregatedListCall) Context(ctx context.Context) *VpnGatewaysAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnGateways") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.aggregatedList" call. +// Exactly one of *VpnGatewayAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *VpnGatewayAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnGatewayAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of VPN gateways. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.vpnGateways.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/vpnGateways", + // "response": { + // "$ref": "VpnGatewayAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *VpnGatewaysAggregatedListCall) Pages(ctx context.Context, f func(*VpnGatewayAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.vpnGateways.delete": + +type VpnGatewaysDeleteCall struct { + s *Service + project string + region string + vpnGateway string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified VPN gateway. (== suppress_warning +// http-rest-shadowed ==) +func (r *VpnGatewaysService) Delete(project string, region string, vpnGateway string) *VpnGatewaysDeleteCall { + c := &VpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpnGateway = vpnGateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *VpnGatewaysDeleteCall) RequestId(requestId string) *VpnGatewaysDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *VpnGatewaysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysDeleteCall) Context(ctx context.Context) *VpnGatewaysDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{vpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "vpnGateway": c.vpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified VPN gateway. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "DELETE", + // "id": "compute.vpnGateways.delete", + // "parameterOrder": [ + // "project", + // "region", + // "vpnGateway" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "vpnGateway": { + // "description": "Name of the VPN gateway to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnGateways.get": + +type VpnGatewaysGetCall struct { + s *Service + project string + region string + vpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified VPN gateway. Gets a list of available VPN +// gateways by making a list() request. (== suppress_warning +// http-rest-shadowed ==) +func (r *VpnGatewaysService) Get(project string, region string, vpnGateway string) *VpnGatewaysGetCall { + c := &VpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpnGateway = vpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysGetCall) Fields(s ...googleapi.Field) *VpnGatewaysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnGatewaysGetCall) IfNoneMatch(entityTag string) *VpnGatewaysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysGetCall) Context(ctx context.Context) *VpnGatewaysGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{vpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "vpnGateway": c.vpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.get" call. +// Exactly one of *VpnGateway or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *VpnGateway.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnGateway{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.vpnGateways.get", + // "parameterOrder": [ + // "project", + // "region", + // "vpnGateway" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "vpnGateway": { + // "description": "Name of the VPN gateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}", + // "response": { + // "$ref": "VpnGateway" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.vpnGateways.getStatus": + +type VpnGatewaysGetStatusCall struct { + s *Service + project string + region string + vpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetStatus: Returns the status for the specified VPN gateway. (== +// suppress_warning http-rest-shadowed ==) +func (r *VpnGatewaysService) GetStatus(project string, region string, vpnGateway string) *VpnGatewaysGetStatusCall { + c := &VpnGatewaysGetStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpnGateway = vpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysGetStatusCall) Fields(s ...googleapi.Field) *VpnGatewaysGetStatusCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnGatewaysGetStatusCall) IfNoneMatch(entityTag string) *VpnGatewaysGetStatusCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysGetStatusCall) Context(ctx context.Context) *VpnGatewaysGetStatusCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysGetStatusCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysGetStatusCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "vpnGateway": c.vpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.getStatus" call. +// Exactly one of *VpnGatewaysGetStatusResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *VpnGatewaysGetStatusResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnGatewaysGetStatusCall) Do(opts ...googleapi.CallOption) (*VpnGatewaysGetStatusResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnGatewaysGetStatusResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the status for the specified VPN gateway. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.vpnGateways.getStatus", + // "parameterOrder": [ + // "project", + // "region", + // "vpnGateway" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "vpnGateway": { + // "description": "Name of the VPN gateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", + // "response": { + // "$ref": "VpnGatewaysGetStatusResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.vpnGateways.insert": + +type VpnGatewaysInsertCall struct { + s *Service + project string + region string + vpngateway *VpnGateway + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a VPN gateway in the specified project and region +// using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) +func (r *VpnGatewaysService) Insert(project string, region string, vpngateway *VpnGateway) *VpnGatewaysInsertCall { + c := &VpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpngateway = vpngateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *VpnGatewaysInsertCall) RequestId(requestId string) *VpnGatewaysInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysInsertCall) Fields(s ...googleapi.Field) *VpnGatewaysInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysInsertCall) Context(ctx context.Context) *VpnGatewaysInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.vpngateway) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a VPN gateway in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.vpnGateways.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways", + // "request": { + // "$ref": "VpnGateway" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnGateways.list": + +type VpnGatewaysListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of VPN gateways available to the specified +// project and region. (== suppress_warning http-rest-shadowed ==) +func (r *VpnGatewaysService) List(project string, region string) *VpnGatewaysListCall { + c := &VpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *VpnGatewaysListCall) Filter(filter string) *VpnGatewaysListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *VpnGatewaysListCall) MaxResults(maxResults int64) *VpnGatewaysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *VpnGatewaysListCall) OrderBy(orderBy string) *VpnGatewaysListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *VpnGatewaysListCall) PageToken(pageToken string) *VpnGatewaysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysListCall) Fields(s ...googleapi.Field) *VpnGatewaysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnGatewaysListCall) IfNoneMatch(entityTag string) *VpnGatewaysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysListCall) Context(ctx context.Context) *VpnGatewaysListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.list" call. +// Exactly one of *VpnGatewayList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *VpnGatewayList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnGatewayList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of VPN gateways available to the specified project and region. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "GET", + // "id": "compute.vpnGateways.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways", + // "response": { + // "$ref": "VpnGatewayList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *VpnGatewaysListCall) Pages(ctx context.Context, f func(*VpnGatewayList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.vpnGateways.setLabels": + +type VpnGatewaysSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a VpnGateway. To learn more about +// labels, read the Labeling Resources documentation. (== +// suppress_warning http-rest-shadowed ==) +func (r *VpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *VpnGatewaysSetLabelsCall { + c := &VpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *VpnGatewaysSetLabelsCall) RequestId(requestId string) *VpnGatewaysSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *VpnGatewaysSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysSetLabelsCall) Context(ctx context.Context) *VpnGatewaysSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.vpnGateways.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnGateways.testIamPermissions": + +type VpnGatewaysTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. (== suppress_warning http-rest-shadowed ==) +func (r *VpnGatewaysService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *VpnGatewaysTestIamPermissionsCall { + c := &VpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *VpnGatewaysTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *VpnGatewaysTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "POST", + // "id": "compute.vpnGateways.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.vpnTunnels.aggregatedList": type VpnTunnelsAggregatedListCall struct { @@ -107301,7 +128600,8 @@ type VpnTunnelsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of VPN tunnels. +// AggregatedList: Retrieves an aggregated list of VPN tunnels. (== +// suppress_warning http-rest-shadowed ==) func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregatedListCall { c := &VpnTunnelsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107408,6 +128708,7 @@ func (c *VpnTunnelsAggregatedListCall) Header() http.Header { func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107469,7 +128770,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun } return ret, nil // { - // "description": "Retrieves an aggregated list of VPN tunnels.", + // "description": "Retrieves an aggregated list of VPN tunnels. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.vpnTunnels.aggregatedList", // "parameterOrder": [ @@ -107553,7 +128854,8 @@ type VpnTunnelsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified VpnTunnel resource. +// Delete: Deletes the specified VpnTunnel resource. (== +// suppress_warning http-rest-shadowed ==) func (r *VpnTunnelsService) Delete(project string, region string, vpnTunnel string) *VpnTunnelsDeleteCall { c := &VpnTunnelsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107608,6 +128910,7 @@ func (c *VpnTunnelsDeleteCall) Header() http.Header { func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107668,7 +128971,7 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Deletes the specified VpnTunnel resource.", + // "description": "Deletes the specified VpnTunnel resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.vpnTunnels.delete", // "parameterOrder": [ @@ -107730,7 +129033,8 @@ type VpnTunnelsGetCall struct { } // Get: Returns the specified VpnTunnel resource. Gets a list of -// available VPN tunnels by making a list() request. +// available VPN tunnels by making a list() request. (== +// suppress_warning http-rest-shadowed ==) func (r *VpnTunnelsService) Get(project string, region string, vpnTunnel string) *VpnTunnelsGetCall { c := &VpnTunnelsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107776,6 +129080,7 @@ func (c *VpnTunnelsGetCall) Header() http.Header { func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107839,7 +129144,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) } return ret, nil // { - // "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", + // "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.vpnTunnels.get", // "parameterOrder": [ @@ -107896,7 +129201,8 @@ type VpnTunnelsInsertCall struct { } // Insert: Creates a VpnTunnel resource in the specified project and -// region using the data included in the request. +// region using the data included in the request. (== suppress_warning +// http-rest-shadowed ==) func (r *VpnTunnelsService) Insert(project string, region string, vpntunnel *VpnTunnel) *VpnTunnelsInsertCall { c := &VpnTunnelsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -107951,6 +129257,7 @@ func (c *VpnTunnelsInsertCall) Header() http.Header { func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108015,7 +129322,7 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + // "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "POST", // "id": "compute.vpnTunnels.insert", // "parameterOrder": [ @@ -108071,7 +129378,8 @@ type VpnTunnelsListCall struct { } // List: Retrieves a list of VpnTunnel resources contained in the -// specified project and region. +// specified project and region. (== suppress_warning http-rest-shadowed +// ==) func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListCall { c := &VpnTunnelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -108179,6 +129487,7 @@ func (c *VpnTunnelsListCall) Header() http.Header { func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108241,7 +129550,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e } return ret, nil // { - // "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", + // "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.vpnTunnels.list", // "parameterOrder": [ @@ -108333,7 +129642,8 @@ type ZoneOperationsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified zone-specific Operations resource. +// Delete: Deletes the specified zone-specific Operations resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/delete func (r *ZoneOperationsService) Delete(project string, zone string, operation string) *ZoneOperationsDeleteCall { c := &ZoneOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -108370,6 +129680,7 @@ func (c *ZoneOperationsDeleteCall) Header() http.Header { func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108405,7 +129716,7 @@ func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } return nil // { - // "description": "Deletes the specified zone-specific Operations resource.", + // "description": "Deletes the specified zone-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "DELETE", // "id": "compute.zoneOperations.delete", // "parameterOrder": [ @@ -108458,7 +129769,8 @@ type ZoneOperationsGetCall struct { header_ http.Header } -// Get: Retrieves the specified zone-specific Operations resource. +// Get: Retrieves the specified zone-specific Operations resource. (== +// suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/get func (r *ZoneOperationsService) Get(project string, zone string, operation string) *ZoneOperationsGetCall { c := &ZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -108505,6 +129817,7 @@ func (c *ZoneOperationsGetCall) Header() http.Header { func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108568,7 +129881,7 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Retrieves the specified zone-specific Operations resource.", + // "description": "Retrieves the specified zone-specific Operations resource. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.zoneOperations.get", // "parameterOrder": [ @@ -108625,7 +129938,7 @@ type ZoneOperationsListCall struct { } // List: Retrieves a list of Operation resources contained within the -// specified zone. +// specified zone. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/list func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperationsListCall { c := &ZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -108734,6 +130047,7 @@ func (c *ZoneOperationsListCall) Header() http.Header { func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108796,7 +130110,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified zone.", + // "description": "Retrieves a list of Operation resources contained within the specified zone. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.zoneOperations.list", // "parameterOrder": [ @@ -108889,7 +130203,8 @@ type ZonesGetCall struct { } // Get: Returns the specified Zone resource. Gets a list of available -// zones by making a list() request. +// zones by making a list() request. (== suppress_warning +// http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/zones/get func (r *ZonesService) Get(project string, zone string) *ZonesGetCall { c := &ZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -108935,6 +130250,7 @@ func (c *ZonesGetCall) Header() http.Header { func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108997,7 +130313,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { } return ret, nil // { - // "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + // "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.zones.get", // "parameterOrder": [ @@ -109045,7 +130361,7 @@ type ZonesListCall struct { } // List: Retrieves the list of Zone resources available to the specified -// project. +// project. (== suppress_warning http-rest-shadowed ==) // For details, see https://cloud.google.com/compute/docs/reference/latest/zones/list func (r *ZonesService) List(project string) *ZonesListCall { c := &ZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -109153,6 +130469,7 @@ func (c *ZonesListCall) Header() http.Header { func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109214,7 +130531,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { } return ret, nil // { - // "description": "Retrieves the list of Zone resources available to the specified project.", + // "description": "Retrieves the list of Zone resources available to the specified project. (== suppress_warning http-rest-shadowed ==)", // "httpMethod": "GET", // "id": "compute.zones.list", // "parameterOrder": [ diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go deleted file mode 100644 index 94b7789eea..0000000000 --- a/vendor/google.golang.org/api/gensupport/backoff.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "math/rand" - "time" -) - -// BackoffStrategy defines the set of functions that a backoff-er must -// implement. -type BackoffStrategy interface { - // Pause returns the duration of the next pause and true if the operation should be - // retried, or false if no further retries should be attempted. - Pause() (time.Duration, bool) - - // Reset restores the strategy to its initial state. - Reset() -} - -// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. -// The initial pause time is given by Base. -// Once the total pause time exceeds Max, Pause will indicate no further retries. -type ExponentialBackoff struct { - Base time.Duration - Max time.Duration - total time.Duration - n uint -} - -// Pause returns the amount of time the caller should wait. -func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { - if eb.total > eb.Max { - return 0, false - } - - // The next pause is selected from randomly from [0, 2^n * Base). - d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) - eb.total += d - eb.n++ - return d, true -} - -// Reset resets the backoff strategy such that the next Pause call will begin -// counting from the start. It is not safe to call concurrently with Pause. -func (eb *ExponentialBackoff) Reset() { - eb.n = 0 - eb.total = 0 -} diff --git a/vendor/google.golang.org/api/gensupport/header.go b/vendor/google.golang.org/api/gensupport/header.go deleted file mode 100644 index cb5e67c77a..0000000000 --- a/vendor/google.golang.org/api/gensupport/header.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "fmt" - "runtime" - "strings" -) - -// GoogleClientHeader returns the value to use for the x-goog-api-client -// header, which is used internally by Google. -func GoogleClientHeader(generatorVersion, clientElement string) string { - elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)} - if clientElement != "" { - elts = append(elts, clientElement) - } - elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion)) - return strings.Join(elts, " ") -} diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go deleted file mode 100644 index fdde3f42c6..0000000000 --- a/vendor/google.golang.org/api/gensupport/retry.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gensupport - -import ( - "context" - "io" - "net" - "net/http" - "time" -) - -// Retry invokes the given function, retrying it multiple times if the connection failed or -// the HTTP status response indicates the request should be attempted again. ctx may be nil. -func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { - for { - resp, err := f() - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Return if we shouldn't retry. - pause, retry := backoff.Pause() - if !shouldRetry(status, err) || !retry { - return resp, err - } - - // Ensure the response body is closed, if any. - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - - // Pause, but still listen to ctx.Done if context is not nil. - var done <-chan struct{} - if ctx != nil { - done = ctx.Done() - } - select { - case <-done: - return nil, ctx.Err() - case <-time.After(pause): - } - } -} - -// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. -func DefaultBackoffStrategy() BackoffStrategy { - return &ExponentialBackoff{ - Base: 250 * time.Millisecond, - Max: 16 * time.Second, - } -} - -// shouldRetry returns true if the HTTP response / error indicates that the -// request should be attempted again. -func shouldRetry(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if status == statusTooManyRequests { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - if err, ok := err.(net.Error); ok { - return err.Temporary() - } - return false -} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index ab53767624..4431716d3b 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -1,4 +1,4 @@ -// Copyright 2011 Google Inc. All rights reserved. +// Copyright 2011 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -16,7 +16,7 @@ import ( "net/url" "strings" - "google.golang.org/api/googleapi/internal/uritemplates" + "google.golang.org/api/internal/third_party/uritemplates" ) // ContentTyper is an interface for Readers which know (or would like @@ -256,14 +256,22 @@ func ProcessMediaOptions(opts []MediaOption) *MediaOptions { // "http://www.golang.org/topics/myproject/mytopic". It strips all parent // references (e.g. ../..) as well as anything after the host // (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz). +// +// ResolveRelative panics if either basestr or relstr is not able to be parsed. func ResolveRelative(basestr, relstr string) string { - u, _ := url.Parse(basestr) + u, err := url.Parse(basestr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", basestr)) + } afterColonPath := "" if i := strings.IndexRune(relstr, ':'); i > 0 { afterColonPath = relstr[i+1:] relstr = relstr[:i] } - rel, _ := url.Parse(relstr) + rel, err := url.Parse(relstr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", relstr)) + } u = u.ResolveReference(rel) us := u.String() if afterColonPath != "" { @@ -331,7 +339,7 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { } // A Field names a field to be retrieved with a partial response. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance // // Partial responses can dramatically reduce the amount of data that must be sent to your application. // In order to request partial responses, you can specify the full list of fields @@ -348,9 +356,6 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // // svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // -// More information about field formatting can be found here: -// https://developers.google.com/+/api/#fields-syntax -// // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ type Field string diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE deleted file mode 100644 index de9c88cb65..0000000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2013 Joshua Tacoma - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go index eca1ea2507..4b6c0d527e 100644 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -1,4 +1,4 @@ -// Copyright 2012 Google Inc. All rights reserved. +// Copyright 2012 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go index a280e3021a..fabf74d50d 100644 --- a/vendor/google.golang.org/api/googleapi/types.go +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -1,4 +1,4 @@ -// Copyright 2013 Google Inc. All rights reserved. +// Copyright 2013 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/google.golang.org/api/iam/v1/iam-api.json b/vendor/google.golang.org/api/iam/v1/iam-api.json index 8664e06927..8c0fc8e046 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-api.json +++ b/vendor/google.golang.org/api/iam/v1/iam-api.json @@ -158,7 +158,7 @@ ], "parameters": { "parent": { - "description": "The resource name of the parent resource in one of the following formats:\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create):\n `projects/{PROJECT_ID}`. This method creates project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create):\n `organizations/{ORGANIZATION_ID}`. This method creates organization-level\n [custom roles](/iam/docs/understanding-custom-roles). Example request\n URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -192,7 +192,7 @@ "type": "string" }, "name": { - "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n deletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+/roles/[^/]+$", "required": true, @@ -217,7 +217,7 @@ ], "parameters": { "name": { - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+/roles/[^/]+$", "required": true, @@ -253,7 +253,7 @@ "type": "string" }, "parent": { - "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, @@ -292,7 +292,7 @@ ], "parameters": { "name": { - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n updates only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+/roles/[^/]+$", "required": true, @@ -326,7 +326,7 @@ ], "parameters": { "name": { - "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes\n only [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n undeletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^organizations/[^/]+/roles/[^/]+$", "required": true, @@ -384,7 +384,7 @@ ], "parameters": { "parent": { - "description": "The resource name of the parent resource in one of the following formats:\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create):\n `projects/{PROJECT_ID}`. This method creates project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create):\n `organizations/{ORGANIZATION_ID}`. This method creates organization-level\n [custom roles](/iam/docs/understanding-custom-roles). Example request\n URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -418,7 +418,7 @@ "type": "string" }, "name": { - "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n deletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^projects/[^/]+/roles/[^/]+$", "required": true, @@ -443,7 +443,7 @@ ], "parameters": { "name": { - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^projects/[^/]+/roles/[^/]+$", "required": true, @@ -479,7 +479,7 @@ "type": "string" }, "parent": { - "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -518,7 +518,7 @@ ], "parameters": { "name": { - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n updates only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^projects/[^/]+/roles/[^/]+$", "required": true, @@ -552,7 +552,7 @@ ], "parameters": { "name": { - "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes\n only [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n undeletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^projects/[^/]+/roles/[^/]+$", "required": true, @@ -665,7 +665,7 @@ ], "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}'.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -717,6 +717,12 @@ "resource" ], "parameters": { + "options.requestedPolicyVersion": { + "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + "format": "int32", + "location": "query", + "type": "integer" + }, "resource": { "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", "location": "path", @@ -919,7 +925,7 @@ ], "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}'.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", "location": "path", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "required": true, @@ -1092,6 +1098,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] + }, + "upload": { + "description": "Upload public key for a given service account.\nThis rpc will create a\nServiceAccountKey that has the\nprovided public key and returns it.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys:upload", + "httpMethod": "POST", + "id": "iam.projects.serviceAccounts.keys.upload", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}/keys:upload", + "request": { + "$ref": "UploadServiceAccountKeyRequest" + }, + "response": { + "$ref": "ServiceAccountKey" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] } } } @@ -1111,7 +1145,7 @@ ], "parameters": { "name": { - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "path", "pattern": "^roles/[^/]+$", "required": true, @@ -1145,7 +1179,7 @@ "type": "string" }, "parent": { - "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", "location": "query", "type": "string" }, @@ -1193,7 +1227,7 @@ } } }, - "revision": "20190429", + "revision": "20191108", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AdminAuditData": { @@ -1208,7 +1242,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"fooservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:bar@gmail.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts foo@gmail.com from DATA_READ logging, and\nbar@gmail.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -1237,7 +1271,7 @@ "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { @@ -1286,7 +1320,7 @@ "description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a user that has been recently deleted. For\n example,`alice@example.com?uid=123456789012345678901`. If the user is\n recovered, this value reverts to `user:{emailid}` and the recovered user\n retains the role in the binding.\n\n* `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus\n unique identifier) representing a service account that has been recently\n deleted. For example,\n `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.\n If the service account is undeleted, this value reverts to\n `serviceAccount:{emailid}` and the undeleted service account retains the\n role in the binding.\n\n* `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique\n identifier) representing a Google group that has been recently\n deleted. For example, `admins@example.com?uid=123456789012345678901`. If\n the group is recovered, this value reverts to `group:{emailid}` and the\n recovered group retains the role in the binding.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", "items": { "type": "string" }, @@ -1319,7 +1353,7 @@ }, "condition": { "$ref": "Expr", - "description": "Unimplemented. The condition that is associated with this binding.\nThis field is logged only for Cloud Audit Logging." + "description": "The condition that is associated with this binding." }, "member": { "description": "A single identity requesting access for a Cloud Platform resource.\nFollows the same format of Binding.members.\nRequired", @@ -1341,7 +1375,7 @@ "description": "The Role resource to create." }, "roleId": { - "description": "The role id to use for this role.", + "description": "The role ID to use for this role.", "type": "string" } }, @@ -1542,7 +1576,7 @@ "type": "string" }, "validationUnitName": { - "description": "The validation unit name, for instance\n“lintValidationUnits/ConditionComplexityCheck”.", + "description": "The validation unit name, for instance\n\"lintValidationUnits/ConditionComplexityCheck\".", "type": "string" } }, @@ -1635,7 +1669,7 @@ "type": "string" }, "description": { - "description": "A brief description of what this Permission is used for.", + "description": "A brief description of what this Permission is used for.\nThis permission can ONLY be used in predefined roles.", "type": "string" }, "name": { @@ -1643,9 +1677,12 @@ "type": "string" }, "onlyInPredefinedRoles": { - "description": "This permission can ONLY be used in predefined roles.", "type": "boolean" }, + "primaryPermission": { + "description": "The preferred name for this permission. If present, then this permission is\nan alias of, and equivalent to, the listed primary_permission.", + "type": "string" + }, "stage": { "description": "The current launch stage of the permission.", "enum": [ @@ -1691,7 +1728,7 @@ "type": "object" }, "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**JSON Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\n**YAML Example**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-other-app@appspot.gserviceaccount.com\n role: roles/owner\n - members:\n - user:sean@example.com\n role: roles/viewer\n\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam/docs).", + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions (defined by IAM or configured by users). A `binding` can\noptionally specify a `condition`, which is a logic expression that further\nconstrains the role binding based on attributes about the request and/or\ntarget resource.\n\n**JSON Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c\n timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ]\n }\n\n**YAML Example**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam/docs).", "id": "Policy", "properties": { "auditConfigs": { @@ -1702,19 +1739,19 @@ "type": "array" }, "bindings": { - "description": "Associates a list of `members` to a `role`.\n`bindings` with no members will result in an error.", + "description": "Associates a list of `members` to a `role`. Optionally may specify a\n`condition` that determines when binding is in effect.\n`bindings` with no members will result in an error.", "items": { "$ref": "Binding" }, "type": "array" }, "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten. Due to blind-set semantics of an etag-less policy,\n'setIamPolicy' will not fail even if either of incoming or stored policy\ndoes not meet the version requirements.", "format": "byte", "type": "string" }, "version": { - "description": "Deprecated.", + "description": "Specifies the format of the policy.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nOperations affecting conditional bindings must specify version 3. This can\nbe either setting a conditional policy, modifying a conditional binding,\nor removing a conditional binding from the stored conditional policy.\nOperations on non-conditional policies may specify any valid value or\nleave the field unset.\n\nIf no etag is provided in the call to `setIamPolicy`, any version\ncompliance checks on the incoming and/or stored policy is skipped.", "format": "int32", "type": "integer" } @@ -1856,7 +1893,7 @@ "type": "boolean" }, "description": { - "description": "Optional. A human-readable description for the role.", + "description": "Optional. A human-readable description for the role.", "type": "string" }, "etag": { @@ -1872,7 +1909,7 @@ "type": "array" }, "name": { - "description": "The name of the role.\n\nWhen Role is used in CreateRole, the role name must not be set.\n\nWhen Role is used in output and other input such as UpdateRole, the role\nname is the complete path, e.g., roles/logging.viewer for curated roles\nand organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles.", + "description": "The name of the role.\n\nWhen Role is used in CreateRole, the role name must not be set.\n\nWhen Role is used in output and other input such as UpdateRole, the role\nname is the complete path, e.g., roles/logging.viewer for predefined roles\nand organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles.", "type": "string" }, "stage": { @@ -1896,7 +1933,7 @@ "type": "string" }, "title": { - "description": "Optional. A human-readable title for the role. Typically this\nis limited to 100 UTF-8 bytes.", + "description": "Optional. A human-readable title for the role. Typically this\nis limited to 100 UTF-8 bytes.", "type": "string" } }, @@ -1964,6 +2001,34 @@ ], "type": "string" }, + "keyOrigin": { + "description": "The key origin.", + "enum": [ + "ORIGIN_UNSPECIFIED", + "USER_PROVIDED", + "GOOGLE_PROVIDED" + ], + "enumDescriptions": [ + "Unspecified key origin.", + "Key is provided by user.", + "Key is provided by Google." + ], + "type": "string" + }, + "keyType": { + "description": "The key type.", + "enum": [ + "KEY_TYPE_UNSPECIFIED", + "USER_MANAGED", + "SYSTEM_MANAGED" + ], + "enumDescriptions": [ + "Unspecified key type. The presence of this in the\nmessage will immediately result in an error.", + "User-managed keys (managed and rotated by the user).", + "System-managed keys (managed and rotated by Google)." + ], + "type": "string" + }, "name": { "description": "The resource name of the service account key in the following format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.", "type": "string" @@ -1998,7 +2063,7 @@ "type": "string" }, "validBeforeTime": { - "description": "The key can be used before this timestamp.", + "description": "The key can be used before this timestamp.\nFor system-managed key pairs, this timestamp is the end time for the\nprivate key signing operation. The public key could still be used\nfor verification for a few hours after this time.", "format": "google-datetime", "type": "string" } @@ -2130,6 +2195,18 @@ } }, "type": "object" + }, + "UploadServiceAccountKeyRequest": { + "description": "The service account key upload request.", + "id": "UploadServiceAccountKeyRequest", + "properties": { + "publicKeyData": { + "description": "A field that allows clients to upload their own public key. If set,\nuse this public key data to create a service account key for given\nservice account.\nPlease note, the expected format for this field is X509_PEM.", + "format": "byte", + "type": "string" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/vendor/google.golang.org/api/iam/v1/iam-gen.go b/vendor/google.golang.org/api/iam/v1/iam-gen.go index 6ec73553ad..469c1dfb4a 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-gen.go +++ b/vendor/google.golang.org/api/iam/v1/iam-gen.go @@ -49,8 +49,8 @@ import ( "strconv" "strings" - gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" htransport "google.golang.org/api/transport/http" ) @@ -293,7 +293,7 @@ func (s *AdminAuditData) MarshalJSON() ([]byte, error) { // { // "log_type": "DATA_READ", // "exempted_members": [ -// "user:foo@gmail.com" +// "user:jose@example.com" // ] // }, // { @@ -305,7 +305,7 @@ func (s *AdminAuditData) MarshalJSON() ([]byte, error) { // ] // }, // { -// "service": "fooservice.googleapis.com" +// "service": "sampleservice.googleapis.com" // "audit_log_configs": [ // { // "log_type": "DATA_READ", @@ -313,7 +313,7 @@ func (s *AdminAuditData) MarshalJSON() ([]byte, error) { // { // "log_type": "DATA_WRITE", // "exempted_members": [ -// "user:bar@gmail.com" +// "user:aliya@example.com" // ] // } // ] @@ -321,11 +321,11 @@ func (s *AdminAuditData) MarshalJSON() ([]byte, error) { // ] // } // -// For fooservice, this policy enables DATA_READ, DATA_WRITE and +// For sampleservice, this policy enables DATA_READ, DATA_WRITE and // ADMIN_READ -// logging. It also exempts foo@gmail.com from DATA_READ logging, +// logging. It also exempts jose@example.com from DATA_READ logging, // and -// bar@gmail.com from DATA_WRITE logging. +// aliya@example.com from DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. @@ -403,7 +403,7 @@ func (s *AuditData) MarshalJSON() ([]byte, error) { // { // "log_type": "DATA_READ", // "exempted_members": [ -// "user:foo@gmail.com" +// "user:jose@example.com" // ] // }, // { @@ -414,7 +414,7 @@ func (s *AuditData) MarshalJSON() ([]byte, error) { // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while // exempting -// foo@gmail.com from DATA_READ logging. +// jose@example.com from DATA_READ logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging // for this type of @@ -508,7 +508,7 @@ type Binding struct { // // * `user:{emailid}`: An email address that represents a specific // Google - // account. For example, `alice@gmail.com` . + // account. For example, `alice@example.com` . // // // * `serviceAccount:{emailid}`: An email address that represents a @@ -520,6 +520,38 @@ type Binding struct { // group. // For example, `admins@example.com`. // + // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus + // unique + // identifier) representing a user that has been recently deleted. + // For + // example,`alice@example.com?uid=123456789012345678901`. If the user + // is + // recovered, this value reverts to `user:{emailid}` and the + // recovered user + // retains the role in the binding. + // + // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address + // (plus + // unique identifier) representing a service account that has been + // recently + // deleted. For example, + // + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account + // retains the + // role in the binding. + // + // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus + // unique + // identifier) representing a Google group that has been recently + // deleted. For example, + // `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` + // and the + // recovered group retains the role in the binding. + // // // * `domain:{domain}`: The G Suite domain (primary) that represents all // the @@ -569,9 +601,7 @@ type BindingDelta struct { // "REMOVE" - Removal of a Binding. Action string `json:"action,omitempty"` - // Condition: Unimplemented. The condition that is associated with this - // binding. - // This field is logged only for Cloud Audit Logging. + // Condition: The condition that is associated with this binding. Condition *Expr `json:"condition,omitempty"` // Member: A single identity requesting access for a Cloud Platform @@ -614,7 +644,7 @@ type CreateRoleRequest struct { // Role: The Role resource to create. Role *Role `json:"role,omitempty"` - // RoleId: The role id to use for this role. + // RoleId: The role ID to use for this role. RoleId string `json:"roleId,omitempty"` // ForceSendFields is a list of field names (e.g. "Role") to @@ -1033,7 +1063,7 @@ type LintResult struct { // ValidationUnitName: The validation unit name, for // instance - // “lintValidationUnits/ConditionComplexityCheck”. + // "lintValidationUnits/ConditionComplexityCheck". ValidationUnitName string `json:"validationUnitName,omitempty"` // ForceSendFields is a list of field names (e.g. "BindingOrdinal") to @@ -1214,16 +1244,21 @@ type Permission struct { // "NOT_SUPPORTED" - Permission is not supported for custom role use. CustomRolesSupportLevel string `json:"customRolesSupportLevel,omitempty"` - // Description: A brief description of what this Permission is used for. + // Description: A brief description of what this Permission is used + // for. + // This permission can ONLY be used in predefined roles. Description string `json:"description,omitempty"` // Name: The name of this Permission. Name string `json:"name,omitempty"` - // OnlyInPredefinedRoles: This permission can ONLY be used in predefined - // roles. OnlyInPredefinedRoles bool `json:"onlyInPredefinedRoles,omitempty"` + // PrimaryPermission: The preferred name for this permission. If + // present, then this permission is + // an alias of, and equivalent to, the listed primary_permission. + PrimaryPermission string `json:"primaryPermission,omitempty"` + // Stage: The current launch stage of the permission. // // Possible values: @@ -1298,31 +1333,43 @@ func (s *PermissionDelta) MarshalJSON() ([]byte, error) { // specify access control policies for Cloud Platform resources. // // -// A `Policy` consists of a list of `bindings`. A `binding` binds a list -// of -// `members` to a `role`, where the members can be user accounts, Google -// groups, -// Google domains, and service accounts. A `role` is a named list of -// permissions -// defined by IAM. +// A `Policy` is a collection of `bindings`. A `binding` binds one or +// more +// `members` to a single `role`. Members can be user accounts, service +// accounts, +// Google groups, and domains (such as G Suite). A `role` is a named +// list of +// permissions (defined by IAM or configured by users). A `binding` +// can +// optionally specify a `condition`, which is a logic expression that +// further +// constrains the role binding based on attributes about the request +// and/or +// target resource. // // **JSON Example** // // { // "bindings": [ // { -// "role": "roles/owner", +// "role": "roles/resourcemanager.organizationAdmin", // "members": [ // "user:mike@example.com", // "group:admins@example.com", // "domain:google.com", // -// "serviceAccount:my-other-app@appspot.gserviceaccount.com" +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" // ] // }, // { -// "role": "roles/viewer", -// "members": ["user:sean@example.com"] +// "role": "roles/resourcemanager.organizationViewer", +// "members": ["user:eve@example.com"], +// "condition": { +// "title": "expirable access", +// "description": "Does not grant access after Sep 2020", +// "expression": "request.time < +// timestamp('2020-10-01T00:00:00.000Z')", +// } // } // ] // } @@ -1334,12 +1381,16 @@ func (s *PermissionDelta) MarshalJSON() ([]byte, error) { // - user:mike@example.com // - group:admins@example.com // - domain:google.com -// - serviceAccount:my-other-app@appspot.gserviceaccount.com -// role: roles/owner +// - serviceAccount:my-project-id@appspot.gserviceaccount.com +// role: roles/resourcemanager.organizationAdmin // - members: -// - user:sean@example.com -// role: roles/viewer -// +// - user:eve@example.com +// role: roles/resourcemanager.organizationViewer +// condition: +// title: expirable access +// description: Does not grant access after Sep 2020 +// expression: request.time < +// timestamp('2020-10-01T00:00:00.000Z') // // For a description of IAM and its features, see the // [IAM developer's guide](https://cloud.google.com/iam/docs). @@ -1348,7 +1399,9 @@ type Policy struct { // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - // Bindings: Associates a list of `members` to a `role`. + // Bindings: Associates a list of `members` to a `role`. Optionally may + // specify a + // `condition` that determines when binding is in effect. // `bindings` with no members will result in an error. Bindings []*Binding `json:"bindings,omitempty"` @@ -1369,10 +1422,32 @@ type Policy struct { // // If no `etag` is provided in the call to `setIamPolicy`, then the // existing - // policy is overwritten blindly. + // policy is overwritten. Due to blind-set semantics of an etag-less + // policy, + // 'setIamPolicy' will not fail even if either of incoming or stored + // policy + // does not meet the version requirements. Etag string `json:"etag,omitempty"` - // Version: Deprecated. + // Version: Specifies the format of the policy. + // + // Valid values are 0, 1, and 3. Requests specifying an invalid value + // will be + // rejected. + // + // Operations affecting conditional bindings must specify version 3. + // This can + // be either setting a conditional policy, modifying a conditional + // binding, + // or removing a conditional binding from the stored conditional + // policy. + // Operations on non-conditional policies may specify any valid value + // or + // leave the field unset. + // + // If no etag is provided in the call to `setIamPolicy`, any + // version + // compliance checks on the incoming and/or stored policy is skipped. Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1678,7 +1753,7 @@ type Role struct { // It will be ignored in calls to CreateRole and UpdateRole. Deleted bool `json:"deleted,omitempty"` - // Description: Optional. A human-readable description for the role. + // Description: Optional. A human-readable description for the role. Description string `json:"description,omitempty"` // Etag: Used to perform a consistent read-modify-write. @@ -1694,7 +1769,7 @@ type Role struct { // // When Role is used in output and other input such as UpdateRole, the // role - // name is the complete path, e.g., roles/logging.viewer for curated + // name is the complete path, e.g., roles/logging.viewer for predefined // roles // and organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom // roles. @@ -1724,7 +1799,7 @@ type Role struct { // phase. Stage string `json:"stage,omitempty"` - // Title: Optional. A human-readable title for the role. Typically + // Title: Optional. A human-readable title for the role. Typically // this // is limited to 100 UTF-8 bytes. Title string `json:"title,omitempty"` @@ -1900,6 +1975,26 @@ type ServiceAccountKey struct { // "KEY_ALG_RSA_2048" - 2k RSA Key. KeyAlgorithm string `json:"keyAlgorithm,omitempty"` + // KeyOrigin: The key origin. + // + // Possible values: + // "ORIGIN_UNSPECIFIED" - Unspecified key origin. + // "USER_PROVIDED" - Key is provided by user. + // "GOOGLE_PROVIDED" - Key is provided by Google. + KeyOrigin string `json:"keyOrigin,omitempty"` + + // KeyType: The key type. + // + // Possible values: + // "KEY_TYPE_UNSPECIFIED" - Unspecified key type. The presence of this + // in the + // message will immediately result in an error. + // "USER_MANAGED" - User-managed keys (managed and rotated by the + // user). + // "SYSTEM_MANAGED" - System-managed keys (managed and rotated by + // Google). + KeyType string `json:"keyType,omitempty"` + // Name: The resource name of the service account key in the following // format // `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. @@ -1945,6 +2040,10 @@ type ServiceAccountKey struct { ValidAfterTime string `json:"validAfterTime,omitempty"` // ValidBeforeTime: The key can be used before this timestamp. + // For system-managed key pairs, this timestamp is the end time for + // the + // private key signing operation. The public key could still be used + // for verification for a few hours after this time. ValidBeforeTime string `json:"validBeforeTime,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2276,6 +2375,40 @@ func (s *UndeleteServiceAccountResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UploadServiceAccountKeyRequest: The service account key upload +// request. +type UploadServiceAccountKeyRequest struct { + // PublicKeyData: A field that allows clients to upload their own public + // key. If set, + // use this public key data to create a service account key for + // given + // service account. + // Please note, the expected format for this field is X509_PEM. + PublicKeyData string `json:"publicKeyData,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PublicKeyData") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PublicKeyData") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UploadServiceAccountKeyRequest) MarshalJSON() ([]byte, error) { + type NoMethod UploadServiceAccountKeyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // method id "iam.iamPolicies.lintPolicy": type IamPoliciesLintPolicyCall struct { @@ -2350,6 +2483,7 @@ func (c *IamPoliciesLintPolicyCall) Header() http.Header { func (c *IamPoliciesLintPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2476,6 +2610,7 @@ func (c *IamPoliciesQueryAuditableServicesCall) Header() http.Header { func (c *IamPoliciesQueryAuditableServicesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2602,6 +2737,7 @@ func (c *OrganizationsRolesCreateCall) Header() http.Header { func (c *OrganizationsRolesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2674,7 +2810,7 @@ func (c *OrganizationsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, // ], // "parameters": { // "parent": { - // "description": "The resource name of the parent resource in one of the following formats:\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create):\n `projects/{PROJECT_ID}`. This method creates project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create):\n `organizations/{ORGANIZATION_ID}`. This method creates organization-level\n [custom roles](/iam/docs/understanding-custom-roles). Example request\n URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -2757,6 +2893,7 @@ func (c *OrganizationsRolesDeleteCall) Header() http.Header { func (c *OrganizationsRolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2830,7 +2967,7 @@ func (c *OrganizationsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, // "type": "string" // }, // "name": { - // "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n deletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+/roles/[^/]+$", // "required": true, @@ -2903,6 +3040,7 @@ func (c *OrganizationsRolesGetCall) Header() http.Header { func (c *OrganizationsRolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2973,7 +3111,7 @@ func (c *OrganizationsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, err // ], // "parameters": { // "name": { - // "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+/roles/[^/]+$", // "required": true, @@ -3083,6 +3221,7 @@ func (c *OrganizationsRolesListCall) Header() http.Header { func (c *OrganizationsRolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3164,7 +3303,7 @@ func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRole // "type": "string" // }, // "parent": { - // "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, @@ -3270,6 +3409,7 @@ func (c *OrganizationsRolesPatchCall) Header() http.Header { func (c *OrganizationsRolesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3342,7 +3482,7 @@ func (c *OrganizationsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, e // ], // "parameters": { // "name": { - // "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n updates only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+/roles/[^/]+$", // "required": true, @@ -3415,6 +3555,7 @@ func (c *OrganizationsRolesUndeleteCall) Header() http.Header { func (c *OrganizationsRolesUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3487,7 +3628,7 @@ func (c *OrganizationsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role // ], // "parameters": { // "name": { - // "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes\n only [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n undeletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^organizations/[^/]+/roles/[^/]+$", // "required": true, @@ -3555,6 +3696,7 @@ func (c *PermissionsQueryTestablePermissionsCall) Header() http.Header { func (c *PermissionsQueryTestablePermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3702,6 +3844,7 @@ func (c *ProjectsRolesCreateCall) Header() http.Header { func (c *ProjectsRolesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3774,7 +3917,7 @@ func (c *ProjectsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, error // ], // "parameters": { // "parent": { - // "description": "The resource name of the parent resource in one of the following formats:\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`projects.roles.create()`](/iam/reference/rest/v1/projects.roles/create):\n `projects/{PROJECT_ID}`. This method creates project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.create()`](/iam/reference/rest/v1/organizations.roles/create):\n `organizations/{ORGANIZATION_ID}`. This method creates organization-level\n [custom roles](/iam/docs/understanding-custom-roles). Example request\n URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -3857,6 +4000,7 @@ func (c *ProjectsRolesDeleteCall) Header() http.Header { func (c *ProjectsRolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3930,7 +4074,7 @@ func (c *ProjectsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, error // "type": "string" // }, // "name": { - // "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.delete()`](/iam/reference/rest/v1/projects.roles/delete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.delete()`](/iam/reference/rest/v1/organizations.roles/delete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n deletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+/roles/[^/]+$", // "required": true, @@ -4003,6 +4147,7 @@ func (c *ProjectsRolesGetCall) Header() http.Header { func (c *ProjectsRolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4073,7 +4218,7 @@ func (c *ProjectsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, error) { // ], // "parameters": { // "name": { - // "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+/roles/[^/]+$", // "required": true, @@ -4183,6 +4328,7 @@ func (c *ProjectsRolesListCall) Header() http.Header { func (c *ProjectsRolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4264,7 +4410,7 @@ func (c *ProjectsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResp // "type": "string" // }, // "parent": { - // "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -4370,6 +4516,7 @@ func (c *ProjectsRolesPatchCall) Header() http.Header { func (c *ProjectsRolesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4442,7 +4589,7 @@ func (c *ProjectsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, error) // ], // "parameters": { // "name": { - // "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.patch()`](/iam/reference/rest/v1/projects.roles/patch):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.patch()`](/iam/reference/rest/v1/organizations.roles/patch):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n updates only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+/roles/[^/]+$", // "required": true, @@ -4515,6 +4662,7 @@ func (c *ProjectsRolesUndeleteCall) Header() http.Header { func (c *ProjectsRolesUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4587,7 +4735,7 @@ func (c *ProjectsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role, err // ], // "parameters": { // "name": { - // "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`projects`](/iam/reference/rest/v1/projects.roles) or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`projects.roles.undelete()`](/iam/reference/rest/v1/projects.roles/undelete):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes\n only [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.undelete()`](/iam/reference/rest/v1/organizations.roles/undelete):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n undeletes only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^projects/[^/]+/roles/[^/]+$", // "required": true, @@ -4655,6 +4803,7 @@ func (c *ProjectsServiceAccountsCreateCall) Header() http.Header { func (c *ProjectsServiceAccountsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4792,6 +4941,7 @@ func (c *ProjectsServiceAccountsDeleteCall) Header() http.Header { func (c *ProjectsServiceAccountsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4949,6 +5099,7 @@ func (c *ProjectsServiceAccountsDisableCall) Header() http.Header { func (c *ProjectsServiceAccountsDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5102,6 +5253,7 @@ func (c *ProjectsServiceAccountsEnableCall) Header() http.Header { func (c *ProjectsServiceAccountsEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5174,7 +5326,7 @@ func (c *ProjectsServiceAccountsEnableCall) Do(opts ...googleapi.CallOption) (*E // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}'.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -5250,6 +5402,7 @@ func (c *ProjectsServiceAccountsGetCall) Header() http.Header { func (c *ProjectsServiceAccountsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5379,6 +5532,24 @@ func (r *ProjectsServiceAccountsService) GetIamPolicy(resource string) *Projects return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "options.requestedPolicyVersion": The policy format version to be +// returned. +// +// Valid values are 0, 1, and 3. Requests specifying an invalid value +// will be +// rejected. +// +// Requests for policies with any conditional bindings must specify +// version 3. +// Policies without any conditional bindings may specify any valid value +// or +// leave the field unset. +func (c *ProjectsServiceAccountsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsServiceAccountsGetIamPolicyCall { + c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -5406,6 +5577,7 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Header() http.Header { func (c *ProjectsServiceAccountsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5472,6 +5644,12 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOptio // "resource" // ], // "parameters": { + // "options.requestedPolicyVersion": { + // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "resource": { // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", @@ -5566,6 +5744,7 @@ func (c *ProjectsServiceAccountsListCall) Header() http.Header { func (c *ProjectsServiceAccountsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5741,6 +5920,7 @@ func (c *ProjectsServiceAccountsPatchCall) Header() http.Header { func (c *ProjectsServiceAccountsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5906,6 +6086,7 @@ func (c *ProjectsServiceAccountsSetIamPolicyCall) Header() http.Header { func (c *ProjectsServiceAccountsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6053,6 +6234,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6206,6 +6388,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6347,6 +6530,7 @@ func (c *ProjectsServiceAccountsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsServiceAccountsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6489,6 +6673,7 @@ func (c *ProjectsServiceAccountsUndeleteCall) Header() http.Header { func (c *ProjectsServiceAccountsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6561,7 +6746,7 @@ func (c *ProjectsServiceAccountsUndeleteCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}'.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -6635,6 +6820,7 @@ func (c *ProjectsServiceAccountsUpdateCall) Header() http.Header { func (c *ProjectsServiceAccountsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6775,6 +6961,7 @@ func (c *ProjectsServiceAccountsKeysCreateCall) Header() http.Header { func (c *ProjectsServiceAccountsKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6912,6 +7099,7 @@ func (c *ProjectsServiceAccountsKeysDeleteCall) Header() http.Header { func (c *ProjectsServiceAccountsKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7066,6 +7254,7 @@ func (c *ProjectsServiceAccountsKeysGetCall) Header() http.Header { func (c *ProjectsServiceAccountsKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7233,6 +7422,7 @@ func (c *ProjectsServiceAccountsKeysListCall) Header() http.Header { func (c *ProjectsServiceAccountsKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7332,6 +7522,149 @@ func (c *ProjectsServiceAccountsKeysListCall) Do(opts ...googleapi.CallOption) ( } +// method id "iam.projects.serviceAccounts.keys.upload": + +type ProjectsServiceAccountsKeysUploadCall struct { + s *Service + name string + uploadserviceaccountkeyrequest *UploadServiceAccountKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Upload: Upload public key for a given service account. +// This rpc will create a +// ServiceAccountKey that has the +// provided public key and returns it. +func (r *ProjectsServiceAccountsKeysService) Upload(name string, uploadserviceaccountkeyrequest *UploadServiceAccountKeyRequest) *ProjectsServiceAccountsKeysUploadCall { + c := &ProjectsServiceAccountsKeysUploadCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.uploadserviceaccountkeyrequest = uploadserviceaccountkeyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsKeysUploadCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsKeysUploadCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsKeysUploadCall) Context(ctx context.Context) *ProjectsServiceAccountsKeysUploadCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsKeysUploadCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsKeysUploadCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.uploadserviceaccountkeyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/keys:upload") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.serviceAccounts.keys.upload" call. +// Exactly one of *ServiceAccountKey or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ServiceAccountKey.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsKeysUploadCall) Do(opts ...googleapi.CallOption) (*ServiceAccountKey, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServiceAccountKey{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Upload public key for a given service account.\nThis rpc will create a\nServiceAccountKey that has the\nprovided public key and returns it.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys:upload", + // "httpMethod": "POST", + // "id": "iam.projects.serviceAccounts.keys.upload", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}/keys:upload", + // "request": { + // "$ref": "UploadServiceAccountKeyRequest" + // }, + // "response": { + // "$ref": "ServiceAccountKey" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "iam.roles.get": type RolesGetCall struct { @@ -7387,6 +7720,7 @@ func (c *RolesGetCall) Header() http.Header { func (c *RolesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7457,7 +7791,7 @@ func (c *RolesGetCall) Do(opts ...googleapi.CallOption) (*Role, error) { // ], // "parameters": { // "name": { - // "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + // "description": "The `name` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `name` value format is described below:\n\n* [`roles.get()`](/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`.\n This method returns results from all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles/{ROLE_NAME}`\n\n* [`projects.roles.get()`](/iam/reference/rest/v1/projects.roles/get):\n `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only\n [custom roles](/iam/docs/understanding-custom-roles) that have been\n created at the project level. Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`\n\n* [`organizations.roles.get()`](/iam/reference/rest/v1/organizations.roles/get):\n `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method\n returns only [custom roles](/iam/docs/understanding-custom-roles) that\n have been created at the organization level. Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "path", // "pattern": "^roles/[^/]+$", // "required": true, @@ -7505,12 +7839,48 @@ func (c *RolesListCall) PageToken(pageToken string) *RolesListCall { return c } -// Parent sets the optional parameter "parent": The resource name of the -// parent resource in one of the following formats: -// `` (empty string) -- this refers to curated -// roles. -// `organizations/{ORGANIZATION_ID}` -// `projects/{PROJECT_ID}` +// Parent sets the optional parameter "parent": The `parent` parameter's +// value depends on the target resource for the +// request, +// namely +// [`roles`](/iam/reference/rest/v1/roles), +// [`projects`](/iam/refe +// rence/rest/v1/projects.roles), +// or +// [`organizations`](/iam/reference/rest/v1/organizations.roles). +// Each +// resource type's `parent` value format is described below: +// +// * [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty +// string. +// This method doesn't require a resource; it simply returns all +// [predefined roles](/iam/docs/understanding-roles#predefined_roles) +// in +// Cloud IAM. Example request URL: +// `https://iam.googleapis.com/v1/roles` +// +// * +// [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list): +// +// `projects/{PROJECT_ID}`. This method lists all project-level +// [custom roles](/iam/docs/understanding-custom-roles). +// Example request URL: +// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` +// +// * +// [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.ro +// les/list): +// `organizations/{ORGANIZATION_ID}`. This method lists all +// organization-level [custom +// roles](/iam/docs/understanding-custom-roles). +// Example request URL: +// +// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` +// +// +// Note: Wildcard (*) values are invalid; you must specify a complete +// project +// ID or organization ID. func (c *RolesListCall) Parent(parent string) *RolesListCall { c.urlParams_.Set("parent", parent) return c @@ -7576,6 +7946,7 @@ func (c *RolesListCall) Header() http.Header { func (c *RolesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7652,7 +8023,7 @@ func (c *RolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResponse, er // "type": "string" // }, // "parent": { - // "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + // "description": "The `parent` parameter's value depends on the target resource for the\nrequest, namely\n[`roles`](/iam/reference/rest/v1/roles),\n[`projects`](/iam/reference/rest/v1/projects.roles), or\n[`organizations`](/iam/reference/rest/v1/organizations.roles). Each\nresource type's `parent` value format is described below:\n\n* [`roles.list()`](/iam/reference/rest/v1/roles/list): An empty string.\n This method doesn't require a resource; it simply returns all\n [predefined roles](/iam/docs/understanding-roles#predefined_roles) in\n Cloud IAM. Example request URL:\n `https://iam.googleapis.com/v1/roles`\n\n* [`projects.roles.list()`](/iam/reference/rest/v1/projects.roles/list):\n `projects/{PROJECT_ID}`. This method lists all project-level\n [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles`\n\n* [`organizations.roles.list()`](/iam/reference/rest/v1/organizations.roles/list):\n `organizations/{ORGANIZATION_ID}`. This method lists all\n organization-level [custom roles](/iam/docs/understanding-custom-roles).\n Example request URL:\n `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles`\n\nNote: Wildcard (*) values are invalid; you must specify a complete project\nID or organization ID.", // "location": "query", // "type": "string" // }, @@ -7751,6 +8122,7 @@ func (c *RolesQueryGrantableRolesCall) Header() http.Header { func (c *RolesQueryGrantableRolesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 69b8659fdd..a6f9a2dea1 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -1,16 +1,6 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package internal diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/internal/gensupport/buffer.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/buffer.go rename to vendor/google.golang.org/api/internal/gensupport/buffer.go diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/internal/gensupport/doc.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/doc.go rename to vendor/google.golang.org/api/internal/gensupport/doc.go diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/json.go rename to vendor/google.golang.org/api/internal/gensupport/json.go diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go similarity index 65% rename from vendor/google.golang.org/api/gensupport/jsonfloat.go rename to vendor/google.golang.org/api/internal/gensupport/jsonfloat.go index 8377850811..13c2f93020 100644 --- a/vendor/google.golang.org/api/gensupport/jsonfloat.go +++ b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go @@ -1,16 +1,6 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package gensupport diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/media.go rename to vendor/google.golang.org/api/internal/gensupport/media.go diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/params.go rename to vendor/google.golang.org/api/internal/gensupport/params.go diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go similarity index 74% rename from vendor/google.golang.org/api/gensupport/resumable.go rename to vendor/google.golang.org/api/internal/gensupport/resumable.go index 2552a6acac..e67ccd9a61 100644 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -12,6 +12,22 @@ import ( "net/http" "sync" "time" + + gax "github.com/googleapis/gax-go/v2" +) + +// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their +// own implementation. +type Backoff interface { + Pause() time.Duration +} + +// These are declared as global variables so that tests can overwrite them. +var ( + retryDeadline = 32 * time.Second + backoff = func() Backoff { + return &gax.Backoff{Initial: 100 * time.Millisecond} + } ) const ( @@ -39,9 +55,6 @@ type ResumableUpload struct { // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. Callback func(int64) - - // If not specified, a default exponential backoff strategy will be used. - Backoff BackoffStrategy } // Progress returns the number of bytes uploaded at this point. @@ -138,15 +151,6 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e return res, nil } -func contextDone(ctx context.Context) bool { - select { - case <-ctx.Done(): - return true - default: - return false - } -} - // Upload starts the process of a resumable upload with a cancellable context. // It retries using the provided back off strategy until cancelled or the // strategy indicates to stop retrying. @@ -156,61 +160,82 @@ func contextDone(ctx context.Context) bool { // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - var pause time.Duration - backoff := rx.Backoff - if backoff == nil { - backoff = DefaultBackoffStrategy() + var shouldRetry = func(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if status == statusTooManyRequests { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + return err.Temporary() + } + return false } - for { - // Ensure that we return in the case of cancelled context, even if pause is 0. - if contextDone(ctx) { - return nil, ctx.Err() - } - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(pause): - } - - resp, err = rx.transferChunk(ctx) - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Check if we should retry the request. - if shouldRetry(status, err) { - var retry bool - pause, retry = backoff.Pause() - if retry { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - continue - } - } - - // If the chunk was uploaded successfully, but there's still - // more to go, upload the next chunk without any delay. - if statusResumeIncomplete(resp) { - pause = 0 - backoff.Reset() - resp.Body.Close() - continue - } - - // It's possible for err and resp to both be non-nil here, but we expose a simpler - // contract to our callers: exactly one of resp and err will be non-nil. This means - // that any response body must be closed here before returning a non-nil error. + // There are a couple of cases where it's possible for err and resp to both + // be non-nil. However, we expose a simpler contract to our callers: exactly + // one of resp and err will be non-nil. This means that any response body + // must be closed here before returning a non-nil error. + var prepareReturn = func(resp *http.Response, err error) (*http.Response, error) { if err != nil { if resp != nil && resp.Body != nil { resp.Body.Close() } return nil, err } - return resp, nil } + + // Send all chunks. + for { + var pause time.Duration + + // Each chunk gets its own initialized-at-zero retry. + bo := backoff() + quitAfter := time.After(retryDeadline) + + // Retry loop for a single chunk. + for { + select { + case <-ctx.Done(): + if err == nil { + err = ctx.Err() + } + return prepareReturn(resp, err) + case <-time.After(pause): + case <-quitAfter: + return prepareReturn(resp, err) + } + + resp, err = rx.transferChunk(ctx) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we should retry the request. + if !shouldRetry(status, err) { + break + } + + pause = bo.Pause() + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + } + + // If the chunk was uploaded successfully, but there's still + // more to go, upload the next chunk without any delay. + if statusResumeIncomplete(resp) { + resp.Body.Close() + continue + } + + return prepareReturn(resp, err) + } } diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/send.go rename to vendor/google.golang.org/api/internal/gensupport/send.go diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go index a4426dcb70..0680dd9901 100644 --- a/vendor/google.golang.org/api/internal/pool.go +++ b/vendor/google.golang.org/api/internal/pool.go @@ -1,16 +1,6 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package internal diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 062301c65f..544d715c87 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -1,16 +1,6 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package internal supports the options and transport packages. package internal @@ -27,19 +17,20 @@ import ( // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { - Endpoint string - Scopes []string - TokenSource oauth2.TokenSource - Credentials *google.Credentials - CredentialsFile string // if set, Token Source is ignored. - CredentialsJSON []byte - UserAgent string - APIKey string - Audiences []string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - NoAuth bool + Endpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + NoAuth bool + TelemetryDisabled bool // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE similarity index 96% rename from vendor/golang.org/x/sync/LICENSE rename to vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE index 6a66aea5ea..7109c6ef93 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2013 Joshua Tacoma. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA new file mode 100644 index 0000000000..c7f86fcd5f --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA @@ -0,0 +1,14 @@ +name: "uritemplates" +description: + "Package uritemplates is a level 4 implementation of RFC 6570 (URI " + "Template, http://tools.ietf.org/html/rfc6570)." + +third_party { + url { + type: GIT + value: "https://github.com/jtacoma/uritemplates" + } + version: "0.1" + last_upgrade_date { year: 2014 month: 8 day: 18 } + license_type: NOTICE +} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go similarity index 98% rename from vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go rename to vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go index 63bf053830..8c27d19d75 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go @@ -191,7 +191,7 @@ func parseTerm(term string) (result templateTerm, err error) { err = errors.New("not a valid name: " + result.name) } if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifers on same term") + err = errors.New("both explode and prefix modifiers on same term") } return result, err } diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go similarity index 100% rename from vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go rename to vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go index 3c8ea7732a..1799b5d9af 100644 --- a/vendor/google.golang.org/api/iterator/iterator.go +++ b/vendor/google.golang.org/api/iterator/iterator.go @@ -1,16 +1,6 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package iterator provides support for standard Google API iterators. // See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. @@ -82,17 +72,23 @@ type PageInfo struct { // It is not a stable interface. var NewPageInfo = newPageInfo -// If an iterator can support paging, its iterator-creating method should call -// this (via the NewPageInfo variable above). +// newPageInfo creates and returns a PageInfo and a next func. If an iterator can +// support paging, its iterator-creating method should call this. Each time the +// iterator's Next is called, it should call the returned next fn to determine +// whether a next item exists, and if so it should pop an item from the buffer. // -// The fetch, bufLen and takeBuf arguments provide access to the -// iterator's internal slice of buffered items. They behave as described in -// PageInfo, above. +// The fetch, bufLen and takeBuf arguments provide access to the iterator's +// internal slice of buffered items. They behave as described in PageInfo, above. // // The return value is the PageInfo.next method bound to the returned PageInfo value. // (Returning it avoids exporting PageInfo.next.) -func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) { - pi := &PageInfo{ +// +// Note: the returned PageInfo and next fn do not remove items from the buffer. +// It is up to the iterator using these to remove items from the buffer: +// typically by performing a pop in its Next. If items are not removed from the +// buffer, memory may grow unbounded. +func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (pi *PageInfo, next func() error) { + pi = &PageInfo{ fetch: fetch, bufLen: bufLen, takeBuf: takeBuf, diff --git a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go b/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go index 29cc6fbf12..3351df11bb 100644 --- a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go +++ b/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go @@ -53,8 +53,8 @@ import ( "strconv" "strings" - gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" htransport "google.golang.org/api/transport/http" ) @@ -422,6 +422,7 @@ func (c *GetCertForOpenIdConnectCall) Header() http.Header { func (c *GetCertForOpenIdConnectCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -550,6 +551,7 @@ func (c *TokeninfoCall) Header() http.Header { func (c *TokeninfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -682,6 +684,7 @@ func (c *UserinfoGetCall) Header() http.Header { func (c *UserinfoGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -808,6 +811,7 @@ func (c *UserinfoV2MeGetCall) Header() http.Header { func (c *UserinfoV2MeGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go index 0636a82945..d06f918b0e 100644 --- a/vendor/google.golang.org/api/option/credentials_go19.go +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -1,16 +1,6 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build go1.9 diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go index 74d3a4b5b9..0ce107a624 100644 --- a/vendor/google.golang.org/api/option/credentials_notgo19.go +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -1,16 +1,6 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build !go1.9 diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 0a1c2dba9e..8a4cd166ca 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -1,16 +1,6 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package option contains options for Google API clients. package option @@ -233,3 +223,16 @@ type withRequestReason string func (w withRequestReason) Apply(o *internal.DialSettings) { o.RequestReason = string(w) } + +// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus) +// settings on gRPC and HTTP clients. +// An example reason would be to bind custom telemetry that overrides the defaults. +func WithTelemetryDisabled() ClientOption { + return withTelemetryDisabledOption{} +} + +type withTelemetryDisabledOption struct{} + +func (w withTelemetryDisabledOption) Apply(o *internal.DialSettings) { + o.TelemetryDisabled = true +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 20b9699e97..6dcb56dcf9 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"VPK3KBfpaEgZ16pozGOoMYfKc0U/p_spPkWHsRi33PRBHlYtU2G_uKg\"", + "etag": "\"F5McR9eEaw0XRpaO3M9gbIugkbs/bQWWH-5yykbmINHZHPMOypW2I3M\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -450,6 +450,13 @@ "required": true, "type": "string" }, + "optionsRequestedPolicyVersion": { + "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, "provisionalUserProject": { "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", "location": "query", @@ -3054,6 +3061,7 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only" ] }, @@ -3196,7 +3204,7 @@ } } }, - "revision": "20190426", + "revision": "20191011", "rootUrl": "https://www.googleapis.com/", "schemas": { "Bucket": { @@ -3292,11 +3300,26 @@ "description": "The bucket's Bucket Policy Only configuration.", "properties": { "enabled": { - "description": "If set, access checks only use bucket-level IAM policies or above.", + "description": "If set, access is controlled only by bucket-level or above IAM policies.", "type": "boolean" }, "lockedTime": { - "description": "The deadline time for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.", + "description": "The deadline for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "uniformBucketLevelAccess": { + "description": "The bucket's uniform bucket-level access configuration.", + "properties": { + "enabled": { + "description": "If set, access is controlled only by bucket-level or above IAM policies.", + "type": "boolean" + }, + "lockedTime": { + "description": "The deadline for changing iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until the locked time, after which the field is immutable.", "format": "date-time", "type": "string" } @@ -3641,7 +3664,7 @@ }, "kind": { "default": "api#channel", - "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", + "description": "Identifies this as a notification channel used to watch for changes to a resource, which is \"api#channel\".", "type": "string" }, "params": { @@ -3742,11 +3765,6 @@ "description": "Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported.", "type": "string" }, - "kind": { - "default": "storage#expr", - "description": "The kind of item this is. For storage, this is always storage#expr. This field is ignored on input.", - "type": "string" - }, "location": { "description": "An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", "type": "string" @@ -4295,6 +4313,11 @@ "resourceId": { "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", "type": "string" + }, + "version": { + "description": "The IAM policy format version.", + "format": "int32", + "type": "integer" } }, "type": "object" diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 9924efb866..fd7d9488cd 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -55,8 +55,8 @@ import ( "strconv" "strings" - gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" htransport "google.golang.org/api/transport/http" ) @@ -525,6 +525,10 @@ type BucketIamConfiguration struct { // BucketPolicyOnly: The bucket's Bucket Policy Only configuration. BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"` + // UniformBucketLevelAccess: The bucket's uniform bucket-level access + // configuration. + UniformBucketLevelAccess *BucketIamConfigurationUniformBucketLevelAccess `json:"uniformBucketLevelAccess,omitempty"` + // ForceSendFields is a list of field names (e.g. "BucketPolicyOnly") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -552,11 +556,11 @@ func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { // BucketIamConfigurationBucketPolicyOnly: The bucket's Bucket Policy // Only configuration. type BucketIamConfigurationBucketPolicyOnly struct { - // Enabled: If set, access checks only use bucket-level IAM policies or - // above. + // Enabled: If set, access is controlled only by bucket-level or above + // IAM policies. Enabled bool `json:"enabled,omitempty"` - // LockedTime: The deadline time for changing + // LockedTime: The deadline for changing // iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC // 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed // from true to false until the locked time, after which the field is @@ -586,6 +590,44 @@ func (s *BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketIamConfigurationUniformBucketLevelAccess: The bucket's uniform +// bucket-level access configuration. +type BucketIamConfigurationUniformBucketLevelAccess struct { + // Enabled: If set, access is controlled only by bucket-level or above + // IAM policies. + Enabled bool `json:"enabled,omitempty"` + + // LockedTime: The deadline for changing + // iamConfiguration.uniformBucketLevelAccess.enabled from true to false + // in RFC 3339 format. + // iamConfiguration.uniformBucketLevelAccess.enabled may be changed from + // true to false until the locked time, after which the field is + // immutable. + LockedTime string `json:"lockedTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { + type NoMethod BucketIamConfigurationUniformBucketLevelAccess + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketLifecycle: The bucket's lifecycle configuration. See lifecycle // management for more information. type BucketLifecycle struct { @@ -1122,7 +1164,7 @@ type Channel struct { Id string `json:"id,omitempty"` // Kind: Identifies this as a notification channel used to watch for - // changes to a resource. Value: the fixed string "api#channel". + // changes to a resource, which is "api#channel". Kind string `json:"kind,omitempty"` // Params: Additional parameters controlling delivery channel behavior. @@ -1291,10 +1333,6 @@ type Expr struct { // message determines which well-known feature set of CEL is supported. Expression string `json:"expression,omitempty"` - // Kind: The kind of item this is. For storage, this is always - // storage#expr. This field is ignored on input. - Kind string `json:"kind,omitempty"` - // Location: An optional string indicating the location of the // expression for error reporting, e.g. a file name and a position in // the file. @@ -2030,6 +2068,9 @@ type Policy struct { // generation can be denoted with #0. This field is ignored on input. ResourceId string `json:"resourceId,omitempty"` + // Version: The IAM policy format version. + Version int64 `json:"version,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2351,6 +2392,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2498,6 +2540,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2664,6 +2707,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2836,6 +2880,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2996,6 +3041,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3169,6 +3215,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3354,6 +3401,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3532,6 +3580,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3675,6 +3724,16 @@ func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older +// version that doesn't support part of the requested IAM policy, the +// request fails. +func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // ProvisionalUserProject sets the optional parameter // "provisionalUserProject": The project to be billed for this request // if the target bucket is requester-pays bucket. @@ -3727,6 +3786,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3801,6 +3861,13 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, + // "optionsRequestedPolicyVersion": { + // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + // "format": "int32", + // "location": "query", + // "minimum": "1", + // "type": "integer" + // }, // "provisionalUserProject": { // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", // "location": "query", @@ -3936,6 +4003,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4192,6 +4260,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4401,6 +4470,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4635,6 +4705,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4863,6 +4934,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5037,6 +5109,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5276,6 +5349,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5487,6 +5561,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5603,6 +5678,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5750,6 +5826,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5917,6 +5994,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6106,6 +6184,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6278,6 +6357,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6451,6 +6531,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6622,6 +6703,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6769,6 +6851,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6938,6 +7021,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7112,6 +7196,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7284,6 +7369,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7455,6 +7541,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7645,6 +7732,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7841,6 +7929,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8025,6 +8114,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8222,6 +8312,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8458,6 +8549,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8789,6 +8881,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9111,6 +9204,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9343,6 +9437,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9592,6 +9687,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9908,6 +10004,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9922,7 +10019,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") if c.mediaInfo_ != nil { - urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) } if body == nil { @@ -10262,6 +10359,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10568,6 +10666,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10959,6 +11058,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11261,6 +11361,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11460,6 +11561,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11720,6 +11822,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12019,6 +12122,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12224,6 +12328,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12373,6 +12478,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12507,6 +12613,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12602,6 +12709,7 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only" // ] // } @@ -12705,6 +12813,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12899,6 +13008,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13075,6 +13185,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/1.13.4 gdcl/20191114") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go deleted file mode 100644 index c553271190..0000000000 --- a/vendor/google.golang.org/api/support/bundler/bundler.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package bundler supports bundling (batching) of items. Bundling amortizes an -// action with fixed costs over multiple items. For example, if an API provides -// an RPC that accepts a list of items as input, but clients would prefer -// adding items one at a time, then a Bundler can accept individual items from -// the client and bundle many of them into a single RPC. -// -// This package is experimental and subject to change without notice. -package bundler - -import ( - "context" - "errors" - "math" - "reflect" - "sync" - "time" - - "golang.org/x/sync/semaphore" -) - -const ( - DefaultDelayThreshold = time.Second - DefaultBundleCountThreshold = 10 - DefaultBundleByteThreshold = 1e6 // 1M - DefaultBufferedByteLimit = 1e9 // 1G -) - -var ( - // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. - ErrOverflow = errors.New("bundler reached buffered byte limit") - - // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. - ErrOversizedItem = errors.New("item size exceeds bundle byte limit") -) - -// A Bundler collects items added to it into a bundle until the bundle -// exceeds a given size, then calls a user-provided function to handle the bundle. -type Bundler struct { - // Starting from the time that the first message is added to a bundle, once - // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. - DelayThreshold time.Duration - - // Once a bundle has this many items, handle the bundle. Since only one - // item at a time is added to a bundle, no bundle will exceed this - // threshold, so it also serves as a limit. The default is - // DefaultBundleCountThreshold. - BundleCountThreshold int - - // Once the number of bytes in current bundle reaches this threshold, handle - // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, - // but does not cap the total size of a bundle. - BundleByteThreshold int - - // The maximum size of a bundle, in bytes. Zero means unlimited. - BundleByteLimit int - - // The maximum number of bytes that the Bundler will keep in memory before - // returning ErrOverflow. The default is DefaultBufferedByteLimit. - BufferedByteLimit int - - // The maximum number of handler invocations that can be running at once. - // The default is 1. - HandlerLimit int - - handler func(interface{}) // called to handle a bundle - itemSliceZero reflect.Value // nil (zero value) for slice of items - flushTimer *time.Timer // implements DelayThreshold - - mu sync.Mutex - sem *semaphore.Weighted // enforces BufferedByteLimit - semOnce sync.Once - curBundle bundle // incoming items added to this bundle - - // Each bundle is assigned a unique ticket that determines the order in which the - // handler is called. The ticket is assigned with mu locked, but waiting for tickets - // to be handled is done via mu2 and cond, below. - nextTicket uint64 // next ticket to be assigned - - mu2 sync.Mutex - cond *sync.Cond - nextHandled uint64 // next ticket to be handled - - // In this implementation, active uses space proportional to HandlerLimit, and - // waitUntilAllHandled takes time proportional to HandlerLimit each time an acquire - // or release occurs, so large values of HandlerLimit max may cause performance - // issues. - active map[uint64]bool // tickets of bundles actively being handled -} - -type bundle struct { - items reflect.Value // slice of item type - size int // size in bytes of all items -} - -// NewBundler creates a new Bundler. -// -// itemExample is a value of the type that will be bundled. For example, if you -// want to create bundles of *Entry, you could pass &Entry{} for itemExample. -// -// handler is a function that will be called on each bundle. If itemExample is -// of type T, the argument to handler is of type []T. handler is always called -// sequentially for each bundle, and never in parallel. -// -// Configure the Bundler by setting its thresholds and limits before calling -// any of its methods. -func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { - b := &Bundler{ - DelayThreshold: DefaultDelayThreshold, - BundleCountThreshold: DefaultBundleCountThreshold, - BundleByteThreshold: DefaultBundleByteThreshold, - BufferedByteLimit: DefaultBufferedByteLimit, - HandlerLimit: 1, - - handler: handler, - itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), - active: map[uint64]bool{}, - } - b.curBundle.items = b.itemSliceZero - b.cond = sync.NewCond(&b.mu2) - return b -} - -func (b *Bundler) initSemaphores() { - // Create the semaphores lazily, because the user may set limits - // after NewBundler. - b.semOnce.Do(func() { - b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit)) - }) -} - -// Add adds item to the current bundle. It marks the bundle for handling and -// starts a new one if any of the thresholds or limits are exceeded. -// -// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then -// the item can never be handled. Add returns ErrOversizedItem in this case. -// -// If adding the item would exceed the maximum memory allowed -// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for -// memory, Add returns ErrOverflow. -// -// Add never blocks. -func (b *Bundler) Add(item interface{}, size int) error { - // If this item exceeds the maximum size of a bundle, - // we can never send it. - if b.BundleByteLimit > 0 && size > b.BundleByteLimit { - return ErrOversizedItem - } - // If adding this item would exceed our allotted memory - // footprint, we can't accept it. - // (TryAcquire also returns false if anything is waiting on the semaphore, - // so calls to Add and AddWait shouldn't be mixed.) - b.initSemaphores() - if !b.sem.TryAcquire(int64(size)) { - return ErrOverflow - } - b.add(item, size) - return nil -} - -// add adds item to the current bundle. It marks the bundle for handling and -// starts a new one if any of the thresholds or limits are exceeded. -func (b *Bundler) add(item interface{}, size int) { - b.mu.Lock() - defer b.mu.Unlock() - - // If adding this item to the current bundle would cause it to exceed the - // maximum bundle size, close the current bundle and start a new one. - if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit { - b.startFlushLocked() - } - // Add the item. - b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item)) - b.curBundle.size += size - - // Start a timer to flush the item if one isn't already running. - // startFlushLocked clears the timer and closes the bundle at the same time, - // so we only allocate a new timer for the first item in each bundle. - // (We could try to call Reset on the timer instead, but that would add a lot - // of complexity to the code just to save one small allocation.) - if b.flushTimer == nil { - b.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush) - } - - // If the current bundle equals the count threshold, close it. - if b.curBundle.items.Len() == b.BundleCountThreshold { - b.startFlushLocked() - } - // If the current bundle equals or exceeds the byte threshold, close it. - if b.curBundle.size >= b.BundleByteThreshold { - b.startFlushLocked() - } -} - -// AddWait adds item to the current bundle. It marks the bundle for handling and -// starts a new one if any of the thresholds or limits are exceeded. -// -// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then -// the item can never be handled. AddWait returns ErrOversizedItem in this case. -// -// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), -// AddWait blocks until space is available or ctx is done. -// -// Calls to Add and AddWait should not be mixed on the same Bundler. -func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error { - // If this item exceeds the maximum size of a bundle, - // we can never send it. - if b.BundleByteLimit > 0 && size > b.BundleByteLimit { - return ErrOversizedItem - } - // If adding this item would exceed our allotted memory footprint, block - // until space is available. The semaphore is FIFO, so there will be no - // starvation. - b.initSemaphores() - if err := b.sem.Acquire(ctx, int64(size)); err != nil { - return err - } - // Here, we've reserved space for item. Other goroutines can call AddWait - // and even acquire space, but no one can take away our reservation - // (assuming sem.Release is used correctly). So there is no race condition - // resulting from locking the mutex after sem.Acquire returns. - b.add(item, size) - return nil -} - -// Flush invokes the handler for all remaining items in the Bundler and waits -// for it to return. -func (b *Bundler) Flush() { - b.mu.Lock() - b.startFlushLocked() - // Here, all bundles with tickets < b.nextTicket are - // either finished or active. Those are the ones - // we want to wait for. - t := b.nextTicket - b.mu.Unlock() - b.initSemaphores() - b.waitUntilAllHandled(t) -} - -func (b *Bundler) startFlushLocked() { - if b.flushTimer != nil { - b.flushTimer.Stop() - b.flushTimer = nil - } - if b.curBundle.items.Len() == 0 { - return - } - // Here, both semaphores must have been initialized. - bun := b.curBundle - b.curBundle = bundle{items: b.itemSliceZero} - ticket := b.nextTicket - b.nextTicket++ - go func() { - defer func() { - b.sem.Release(int64(bun.size)) - b.release(ticket) - }() - b.acquire(ticket) - b.handler(bun.items.Interface()) - }() -} - -// acquire blocks until ticket is the next to be served, then returns. In order for N -// acquire calls to return, the tickets must be in the range [0, N). A ticket must -// not be presented to acquire more than once. -func (b *Bundler) acquire(ticket uint64) { - b.mu2.Lock() - defer b.mu2.Unlock() - if ticket < b.nextHandled { - panic("bundler: acquire: arg too small") - } - for !(ticket == b.nextHandled && len(b.active) < b.HandlerLimit) { - b.cond.Wait() - } - // Here, - // ticket == b.nextHandled: the caller is the next one to be handled; - // and len(b.active) < b.HandlerLimit: there is space available. - b.active[ticket] = true - b.nextHandled++ - // Broadcast, not Signal: although at most one acquire waiter can make progress, - // there might be waiters in waitUntilAllHandled. - b.cond.Broadcast() -} - -// If a ticket is used for a call to acquire, it must later be passed to release. A -// ticket must not be presented to release more than once. -func (b *Bundler) release(ticket uint64) { - b.mu2.Lock() - defer b.mu2.Unlock() - if !b.active[ticket] { - panic("bundler: release: not an active ticket") - } - delete(b.active, ticket) - b.cond.Broadcast() -} - -// waitUntilAllHandled blocks until all tickets < n have called release, meaning -// all bundles with tickets < n have been handled. -func (b *Bundler) waitUntilAllHandled(n uint64) { - // Proof of correctness of this function. - // "N is acquired" means acquire(N) has returned. - // "N is released" means release(N) has returned. - // 1. If N is acquired, N-1 is acquired. - // Follows from the loop test in acquire, and the fact - // that nextHandled is incremented by 1. - // 2. If nextHandled >= N, then N-1 is acquired. - // Because we only increment nextHandled to N after N-1 is acquired. - // 3. If nextHandled >= N, then all n < N is acquired. - // Follows from #1 and #2. - // 4. If N is acquired and N is not in active, then N is released. - // Because we put N in active before acquire returns, and only - // remove it when it is released. - // Let min(active) be the smallest member of active, or infinity if active is empty. - // 5. If nextHandled >= N and N <= min(active), then all n < N is released. - // From nextHandled >= N and #3, all n < N is acquired. - // N <= min(active) implies n < min(active) for all n < N. So all n < N is not in active. - // So from #4, all n < N is released. - // The loop test below is the antecedent of #5. - b.mu2.Lock() - defer b.mu2.Unlock() - for !(b.nextHandled >= n && n <= min(b.active)) { - b.cond.Wait() - } -} - -// min returns the minimum value of the set s, or the largest uint64 if -// s is empty. -func min(s map[uint64]bool) uint64 { - var m uint64 = math.MaxUint64 - for n := range s { - if n < m { - m = n - } - } - return m -} diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go index 1fb7cf905d..2c495ad538 100644 --- a/vendor/google.golang.org/api/transport/dial.go +++ b/vendor/google.golang.org/api/transport/dial.go @@ -1,16 +1,6 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package transport diff --git a/vendor/google.golang.org/api/transport/doc.go b/vendor/google.golang.org/api/transport/doc.go index 4915036c35..7143abee45 100644 --- a/vendor/google.golang.org/api/transport/doc.go +++ b/vendor/google.golang.org/api/transport/doc.go @@ -1,16 +1,6 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package transport provides utility methods for creating authenticated // transports to Google's HTTP and gRPC APIs. It is intended to be used in diff --git a/vendor/google.golang.org/api/transport/go19.go b/vendor/google.golang.org/api/transport/go19.go index 3e89f93287..abaa633f4e 100644 --- a/vendor/google.golang.org/api/transport/go19.go +++ b/vendor/google.golang.org/api/transport/go19.go @@ -1,16 +1,6 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build go1.9 diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 4f6b94debb..7526e6820c 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -1,16 +1,6 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package grpc supports network connections to GRPC servers. // This package is not intended for use by end developers. Use the @@ -21,18 +11,28 @@ import ( "context" "errors" "log" + "os" + "strings" "go.opencensus.io/plugin/ocgrpc" + "golang.org/x/oauth2" "google.golang.org/api/internal" "google.golang.org/api/option" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + grpcgoogle "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/credentials/oauth" + + // Install grpclb, which is required for direct path. + _ "google.golang.org/grpc/balancer/grpclb" ) // Set at init time by dial_appengine.go. If nil, we're not on App Engine. var appengineDialerHook func(context.Context) grpc.DialOption +// Set at init time by dial_socketopt.go. If nil, socketopt is not supported. +var timeoutDialerOption grpc.DialOption + // Dial returns a GRPC connection for use communicating with a Google cloud // service, configured with the given ClientOptions. func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { @@ -60,9 +60,7 @@ func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc if o.GRPCConn != nil { return o.GRPCConn, nil } - grpcOpts := []grpc.DialOption{ - grpc.WithWaitForHandshake(), - } + var grpcOpts []grpc.DialOption if insecure { grpcOpts = []grpc.DialOption{grpc.WithInsecure()} } else if !o.NoAuth { @@ -73,31 +71,64 @@ func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc if err != nil { return nil, err } - grpcOpts = []grpc.DialOption{ - grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{creds.TokenSource}, - quotaProject: o.QuotaProject, - requestReason: o.RequestReason, - }), - grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + // Attempt Direct Path only if: + // * The endpoint is a host:port (or dns:///host:port). + // * Credentials are obtained via GCE metadata server, using the default + // service account. + // * Opted in via GOOGLE_CLOUD_ENABLE_DIRECT_PATH environment variable. + // For example, GOOGLE_CLOUD_ENABLE_DIRECT_PATH=spanner,pubsub + if isDirectPathEnabled(o.Endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { + if !strings.HasPrefix(o.Endpoint, "dns:///") { + o.Endpoint = "dns:///" + o.Endpoint + } + grpcOpts = []grpc.DialOption{ + grpc.WithCredentialsBundle( + grpcgoogle.NewComputeEngineCredentials(), + ), + } + // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. + } else { + grpcOpts = []grpc.DialOption{ + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + } } } + if appengineDialerHook != nil { // Use the Socket API on App Engine. + // appengine dialer will override socketopt dialer grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) } + // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. - grpcOpts = addOCStatsHandler(grpcOpts) + grpcOpts = addOCStatsHandler(grpcOpts, o) grpcOpts = append(grpcOpts, o.GRPCDialOpts...) if o.UserAgent != "" { grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) } + + // TODO(weiranf): This socketopt dialer will be used by default at some + // point when isDirectPathEnabled will default to true, we guard it by + // the Directpath env var for now once we can introspect user defined + // dialer (https://github.com/grpc/grpc-go/issues/2795). + if timeoutDialerOption != nil && isDirectPathEnabled(o.Endpoint) { + grpcOpts = append(grpcOpts, timeoutDialerOption) + } + return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) } -func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption { +func addOCStatsHandler(opts []grpc.DialOption, settings internal.DialSettings) []grpc.DialOption { + if settings.TelemetryDisabled { + return opts + } return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) } @@ -118,7 +149,7 @@ func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) return nil, err } - // Attach system parameters into the metadata + // Attach system parameter if ts.quotaProject != "" { metadata["X-goog-user-project"] = ts.quotaProject } @@ -127,3 +158,45 @@ func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) } return metadata, nil } + +func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource) bool { + if ts == nil { + return false + } + tok, err := ts.Token() + if err != nil { + return false + } + if tok == nil { + return false + } + if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" { + return false + } + if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" { + return false + } + return true +} + +func isDirectPathEnabled(endpoint string) bool { + // Only host:port is supported, not other schemes (e.g., "tcp://" or "unix://"). + // Also don't try direct path if the user has chosen an alternate name resolver + // (i.e., via ":///" prefix). + // + // TODO(cbro): once gRPC has introspectible options, check the user hasn't + // provided a custom dialer in gRPC options. + if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { + return false + } + + // Only try direct path if the user has opted in via the environment variable. + whitelist := strings.Split(os.Getenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH"), ",") + for _, api := range whitelist { + // Ignore empty string since an empty env variable splits into [""] + if api != "" && strings.Contains(endpoint, api) { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go index 87819d4e10..2c6aef2264 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go +++ b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go @@ -1,16 +1,6 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build appengine diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go new file mode 100644 index 0000000000..0e4f388968 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -0,0 +1,49 @@ +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11,linux + +package grpc + +import ( + "context" + "net" + "syscall" + + "golang.org/x/sys/unix" + "google.golang.org/grpc" +) + +const ( + // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By + // default is 20 seconds. + tcpUserTimeoutMilliseconds = 20000 +) + +func init() { + // timeoutDialerOption is a grpc.DialOption that contains dialer with + // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. + timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) +} + +func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { + control := func(network, address string, c syscall.RawConn) error { + var syscallErr error + controlErr := c.Control(func(fd uintptr) { + syscallErr = syscall.SetsockoptInt( + int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) + }) + if syscallErr != nil { + return syscallErr + } + if controlErr != nil { + return controlErr + } + return nil + } + d := &net.Dialer{ + Control: control, + } + return d.DialContext(ctx, "tcp", addr) +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index c0d8bf20b0..1ef67cefb7 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -1,16 +1,6 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package http supports network connections to HTTP servers. // This package is not intended for use by end developers. Use the @@ -70,7 +60,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } - trans = addOCTransport(trans) + trans = addOCTransport(trans, settings) switch { case settings.NoAuth: // Do nothing. @@ -119,16 +109,15 @@ func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) if rt == nil { return nil, errors.New("transport: no Transport specified") } - if t.userAgent == "" { - return rt.RoundTrip(req) - } newReq := *req newReq.Header = make(http.Header) for k, vv := range req.Header { newReq.Header[k] = vv } - // TODO(cbro): append to existing User-Agent header? - newReq.Header.Set("User-Agent", t.userAgent) + if t.userAgent != "" { + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + } // Attach system parameters into the header if t.quotaProject != "" { @@ -153,7 +142,10 @@ func defaultBaseTransport(ctx context.Context) http.RoundTripper { return http.DefaultTransport } -func addOCTransport(trans http.RoundTripper) http.RoundTripper { +func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { + if settings.TelemetryDisabled { + return trans + } return &ochttp.Transport{ Base: trans, Propagation: &propagation.HTTPFormat{}, diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go index 04c81413c5..baee9f27af 100644 --- a/vendor/google.golang.org/api/transport/http/dial_appengine.go +++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go @@ -1,16 +1,6 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build appengine diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go index 24b4f0d291..fb951bb162 100644 --- a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go +++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go @@ -1,16 +1,6 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build go1.8 diff --git a/vendor/google.golang.org/api/transport/not_go19.go b/vendor/google.golang.org/api/transport/not_go19.go index 0cb6275944..657bb6b2e9 100644 --- a/vendor/google.golang.org/api/transport/not_go19.go +++ b/vendor/google.golang.org/api/transport/not_go19.go @@ -1,16 +1,6 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build !go1.9 diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go deleted file mode 100644 index 5b6c587a96..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/api/httpbody.proto - -package httpbody - -import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Message that represents an arbitrary HTTP body. It should only be used for -// payload formats that can't be represented as JSON, such as raw binary or -// an HTML page. -// -// -// This message can be used both in streaming and non-streaming API methods in -// the request as well as the response. -// -// It can be used as a top-level request field, which is convenient if one -// wants to extract parameters from either the URL or HTTP template into the -// request fields and also want access to the raw HTTP body. -// -// Example: -// -// message GetResourceRequest { -// // A unique request id. -// string request_id = 1; -// -// // The raw HTTP body is bound to this field. -// google.api.HttpBody http_body = 2; -// } -// -// service ResourceService { -// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); -// rpc UpdateResource(google.api.HttpBody) returns -// (google.protobuf.Empty); -// } -// -// Example with streaming methods: -// -// service CaldavService { -// rpc GetCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); -// rpc UpdateCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); -// } -// -// Use of this type only changes how the request and response bodies are -// handled, all other features will continue to work unchanged. -type HttpBody struct { - // The HTTP Content-Type header value specifying the content type of the body. - ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` - // The HTTP request/response body as raw binary. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - // Application specific response metadata. Must be set in the first response - // for streaming APIs. - Extensions []*any.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HttpBody) Reset() { *m = HttpBody{} } -func (m *HttpBody) String() string { return proto.CompactTextString(m) } -func (*HttpBody) ProtoMessage() {} -func (*HttpBody) Descriptor() ([]byte, []int) { - return fileDescriptor_09ea2ecaa32a0070, []int{0} -} - -func (m *HttpBody) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HttpBody.Unmarshal(m, b) -} -func (m *HttpBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HttpBody.Marshal(b, m, deterministic) -} -func (m *HttpBody) XXX_Merge(src proto.Message) { - xxx_messageInfo_HttpBody.Merge(m, src) -} -func (m *HttpBody) XXX_Size() int { - return xxx_messageInfo_HttpBody.Size(m) -} -func (m *HttpBody) XXX_DiscardUnknown() { - xxx_messageInfo_HttpBody.DiscardUnknown(m) -} - -var xxx_messageInfo_HttpBody proto.InternalMessageInfo - -func (m *HttpBody) GetContentType() string { - if m != nil { - return m.ContentType - } - return "" -} - -func (m *HttpBody) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *HttpBody) GetExtensions() []*any.Any { - if m != nil { - return m.Extensions - } - return nil -} - -func init() { - proto.RegisterType((*HttpBody)(nil), "google.api.HttpBody") -} - -func init() { proto.RegisterFile("google/api/httpbody.proto", fileDescriptor_09ea2ecaa32a0070) } - -var fileDescriptor_09ea2ecaa32a0070 = []byte{ - // 229 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x31, 0x4f, 0xc3, 0x30, - 0x10, 0x85, 0xe5, 0xb6, 0x42, 0x70, 0x2d, 0x0c, 0x16, 0x43, 0x60, 0x0a, 0x4c, 0x99, 0x6c, 0x09, - 0xd8, 0x3a, 0x35, 0x0b, 0xb0, 0x45, 0x11, 0x13, 0x0b, 0x72, 0x1a, 0xe3, 0x46, 0x2a, 0x77, 0xa7, - 0xe6, 0x10, 0xf8, 0xef, 0xf0, 0x2b, 0x19, 0x11, 0x69, 0x2c, 0xe8, 0xf6, 0xe4, 0xef, 0x3d, 0xbf, - 0x77, 0x70, 0x11, 0x88, 0xc2, 0xd6, 0x5b, 0xc7, 0x9d, 0xdd, 0x88, 0x70, 0x43, 0x6d, 0x34, 0xbc, - 0x23, 0x21, 0x0d, 0x7b, 0x64, 0x1c, 0x77, 0x97, 0xc9, 0x36, 0x90, 0xe6, 0xfd, 0xd5, 0x3a, 0x1c, - 0x6d, 0xd7, 0x1f, 0x70, 0xfc, 0x20, 0xc2, 0x25, 0xb5, 0x51, 0x5f, 0xc1, 0x62, 0x4d, 0x28, 0x1e, - 0xe5, 0x45, 0x22, 0xfb, 0x4c, 0xe5, 0xaa, 0x38, 0xa9, 0xe7, 0xe3, 0xdb, 0x53, 0x64, 0xaf, 0x35, - 0xcc, 0x5a, 0x27, 0x2e, 0x9b, 0xe4, 0xaa, 0x58, 0xd4, 0x83, 0xd6, 0x77, 0x00, 0xfe, 0x53, 0x3c, - 0xf6, 0x1d, 0x61, 0x9f, 0x4d, 0xf3, 0x69, 0x31, 0xbf, 0x39, 0x37, 0x63, 0x7d, 0xaa, 0x34, 0x2b, - 0x8c, 0xf5, 0x3f, 0x5f, 0xb9, 0x81, 0xb3, 0x35, 0xbd, 0x99, 0xbf, 0x95, 0xe5, 0x69, 0x1a, 0x52, - 0xfd, 0x66, 0x2a, 0xf5, 0xbc, 0x1c, 0x61, 0xa0, 0xad, 0xc3, 0x60, 0x68, 0x17, 0x6c, 0xf0, 0x38, - 0xfc, 0x68, 0xf7, 0xc8, 0x71, 0xd7, 0x1f, 0x1c, 0xbf, 0x4c, 0xe2, 0x5b, 0xa9, 0xaf, 0xc9, 0xec, - 0x7e, 0x55, 0x3d, 0x36, 0x47, 0x43, 0xe2, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x78, 0xb9, 0x16, - 0x2b, 0x2d, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go new file mode 100644 index 0000000000..78b1c537a8 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -0,0 +1,839 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/lb/v1/load_balancer.proto + +package grpc_lb_v1 // import "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LoadBalanceRequest struct { + // Types that are valid to be assigned to LoadBalanceRequestType: + // *LoadBalanceRequest_InitialRequest + // *LoadBalanceRequest_ClientStats + LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } +func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceRequest) ProtoMessage() {} +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{0} +} +func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b) +} +func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic) +} +func (dst *LoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceRequest.Merge(dst, src) +} +func (m *LoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_LoadBalanceRequest.Size(m) +} +func (m *LoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo + +type isLoadBalanceRequest_LoadBalanceRequestType interface { + isLoadBalanceRequest_LoadBalanceRequestType() +} + +type LoadBalanceRequest_InitialRequest struct { + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"` +} + +type LoadBalanceRequest_ClientStats struct { + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"` +} + +func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { + if m != nil { + return m.LoadBalanceRequestType + } + return nil +} + +func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { + return x.InitialRequest + } + return nil +} + +func (m *LoadBalanceRequest) GetClientStats() *ClientStats { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { + return x.ClientStats + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{ + (*LoadBalanceRequest_InitialRequest)(nil), + (*LoadBalanceRequest_ClientStats)(nil), + } +} + +func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialRequest); err != nil { + return err + } + case *LoadBalanceRequest_ClientStats: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientStats); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceRequest) + switch tag { + case 1: // load_balance_request_type.initial_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceRequest) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg} + return true, err + case 2: // load_balance_request_type.client_stats + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientStats) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + s := proto.Size(x.InitialRequest) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceRequest_ClientStats: + s := proto.Size(x.ClientStats) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceRequest struct { + // The name of the load balanced service (e.g., service.googleapis.com). Its + // length should be less than 256 bytes. + // The name might include a port number. How to handle the port number is up + // to the balancer. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } +func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceRequest) ProtoMessage() {} +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{1} +} +func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b) +} +func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic) +} +func (dst *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceRequest.Merge(dst, src) +} +func (m *InitialLoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceRequest.Size(m) +} +func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo + +func (m *InitialLoadBalanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Contains the number of calls finished for a particular load balance token. +type ClientStatsPerToken struct { + // See Server.load_balance_token. + LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // The total number of RPCs that finished associated with the token. + NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientStatsPerToken) Reset() { *m = ClientStatsPerToken{} } +func (m *ClientStatsPerToken) String() string { return proto.CompactTextString(m) } +func (*ClientStatsPerToken) ProtoMessage() {} +func (*ClientStatsPerToken) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{2} +} +func (m *ClientStatsPerToken) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientStatsPerToken.Unmarshal(m, b) +} +func (m *ClientStatsPerToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientStatsPerToken.Marshal(b, m, deterministic) +} +func (dst *ClientStatsPerToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStatsPerToken.Merge(dst, src) +} +func (m *ClientStatsPerToken) XXX_Size() int { + return xxx_messageInfo_ClientStatsPerToken.Size(m) +} +func (m *ClientStatsPerToken) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStatsPerToken.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStatsPerToken proto.InternalMessageInfo + +func (m *ClientStatsPerToken) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *ClientStatsPerToken) GetNumCalls() int64 { + if m != nil { + return m.NumCalls + } + return 0 +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +type ClientStats struct { + // The timestamp of generating the report. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"` + // The list of dropped calls. + CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{3} +} +func (m *ClientStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientStats.Unmarshal(m, b) +} +func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic) +} +func (dst *ClientStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStats.Merge(dst, src) +} +func (m *ClientStats) XXX_Size() int { + return xxx_messageInfo_ClientStats.Size(m) +} +func (m *ClientStats) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStats.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStats proto.InternalMessageInfo + +func (m *ClientStats) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *ClientStats) GetNumCallsStarted() int64 { + if m != nil { + return m.NumCallsStarted + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinished() int64 { + if m != nil { + return m.NumCallsFinished + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if m != nil { + return m.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if m != nil { + return m.NumCallsFinishedKnownReceived + } + return 0 +} + +func (m *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken { + if m != nil { + return m.CallsFinishedWithDrop + } + return nil +} + +type LoadBalanceResponse struct { + // Types that are valid to be assigned to LoadBalanceResponseType: + // *LoadBalanceResponse_InitialResponse + // *LoadBalanceResponse_ServerList + LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } +func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceResponse) ProtoMessage() {} +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{4} +} +func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b) +} +func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic) +} +func (dst *LoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceResponse.Merge(dst, src) +} +func (m *LoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_LoadBalanceResponse.Size(m) +} +func (m *LoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo + +type isLoadBalanceResponse_LoadBalanceResponseType interface { + isLoadBalanceResponse_LoadBalanceResponseType() +} + +type LoadBalanceResponse_InitialResponse struct { + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"` +} + +type LoadBalanceResponse_ServerList struct { + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"` +} + +func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { + if m != nil { + return m.LoadBalanceResponseType + } + return nil +} + +func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { + return x.InitialResponse + } + return nil +} + +func (m *LoadBalanceResponse) GetServerList() *ServerList { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { + return x.ServerList + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{ + (*LoadBalanceResponse_InitialResponse)(nil), + (*LoadBalanceResponse_ServerList)(nil), + } +} + +func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialResponse); err != nil { + return err + } + case *LoadBalanceResponse_ServerList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceResponse) + switch tag { + case 1: // load_balance_response_type.initial_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceResponse) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg} + return true, err + case 2: // load_balance_response_type.server_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerList) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + s := proto.Size(x.InitialResponse) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceResponse_ServerList: + s := proto.Size(x.ServerList) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceResponse struct { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate,proto3" json:"load_balancer_delegate,omitempty"` + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + ClientStatsReportInterval *duration.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } +func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceResponse) ProtoMessage() {} +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{5} +} +func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b) +} +func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic) +} +func (dst *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceResponse.Merge(dst, src) +} +func (m *InitialLoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceResponse.Size(m) +} +func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo + +func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { + if m != nil { + return m.LoadBalancerDelegate + } + return "" +} + +func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *duration.Duration { + if m != nil { + return m.ClientStatsReportInterval + } + return nil +} + +type ServerList struct { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerList) Reset() { *m = ServerList{} } +func (m *ServerList) String() string { return proto.CompactTextString(m) } +func (*ServerList) ProtoMessage() {} +func (*ServerList) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{6} +} +func (m *ServerList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerList.Unmarshal(m, b) +} +func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerList.Marshal(b, m, deterministic) +} +func (dst *ServerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerList.Merge(dst, src) +} +func (m *ServerList) XXX_Size() int { + return xxx_messageInfo_ServerList.Size(m) +} +func (m *ServerList) XXX_DiscardUnknown() { + xxx_messageInfo_ServerList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerList proto.InternalMessageInfo + +func (m *ServerList) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +// Contains server information. When the drop field is not true, use the other +// fields. +type Server struct { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // A resolved port number for the server. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // An opaque but printable token for load reporting. The client must include + // the token of the picked server into the initial metadata when it starts a + // call to that server. The token is used by the server to verify the request + // and to allow the server to report load to the gRPC LB system. The token is + // also used in client stats for reporting dropped calls. + // + // Its length can be variable but must be less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // Indicates whether this particular request should be dropped by the client. + // If the request is dropped, there will be a corresponding entry in + // ClientStats.calls_finished_with_drop. + Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{7} +} +func (m *Server) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Server.Unmarshal(m, b) +} +func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Server.Marshal(b, m, deterministic) +} +func (dst *Server) XXX_Merge(src proto.Message) { + xxx_messageInfo_Server.Merge(dst, src) +} +func (m *Server) XXX_Size() int { + return xxx_messageInfo_Server.Size(m) +} +func (m *Server) XXX_DiscardUnknown() { + xxx_messageInfo_Server.DiscardUnknown(m) +} + +var xxx_messageInfo_Server proto.InternalMessageInfo + +func (m *Server) GetIpAddress() []byte { + if m != nil { + return m.IpAddress + } + return nil +} + +func (m *Server) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Server) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *Server) GetDrop() bool { + if m != nil { + return m.Drop + } + return false +} + +func init() { + proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") + proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") + proto.RegisterType((*ClientStatsPerToken)(nil), "grpc.lb.v1.ClientStatsPerToken") + proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") + proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") + proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") + proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") + proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LoadBalancerClient is the client API for LoadBalancer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LoadBalancerClient interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) +} + +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func NewLoadBalancerClient(cc *grpc.ClientConn) LoadBalancerClient { + return &loadBalancerClient{cc} +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { + stream, err := c.cc.NewStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &loadBalancerBalanceLoadClient{stream} + return x, nil +} + +type LoadBalancer_BalanceLoadClient interface { + Send(*LoadBalanceRequest) error + Recv() (*LoadBalanceResponse, error) + grpc.ClientStream +} + +type loadBalancerBalanceLoadClient struct { + grpc.ClientStream +} + +func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { + m := new(LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadBalancerServer is the server API for LoadBalancer service. +type LoadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(LoadBalancer_BalanceLoadServer) error +} + +func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { + s.RegisterService(&_LoadBalancer_serviceDesc, srv) +} + +func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) +} + +type LoadBalancer_BalanceLoadServer interface { + Send(*LoadBalanceResponse) error + Recv() (*LoadBalanceRequest, error) + grpc.ServerStream +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream +} + +func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { + m := new(LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*LoadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: _LoadBalancer_BalanceLoad_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/lb/v1/load_balancer.proto", +} + +func init() { + proto.RegisterFile("grpc/lb/v1/load_balancer.proto", fileDescriptor_load_balancer_12026aec3f0251ba) +} + +var fileDescriptor_load_balancer_12026aec3f0251ba = []byte{ + // 752 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0xdd, 0x6e, 0x23, 0x35, + 0x14, 0xee, 0x90, 0x69, 0x36, 0x39, 0x29, 0x34, 0xeb, 0x85, 0x65, 0x92, 0xdd, 0x6d, 0x4b, 0x24, + 0x56, 0x11, 0x2a, 0x13, 0x52, 0xb8, 0x00, 0x89, 0x0b, 0x48, 0xab, 0x2a, 0x2d, 0xbd, 0x88, 0x9c, + 0x4a, 0x45, 0x95, 0x90, 0x99, 0xc9, 0xb8, 0xa9, 0x55, 0xc7, 0x1e, 0x3c, 0x4e, 0x2a, 0xae, 0x79, + 0x1f, 0xc4, 0x2b, 0x20, 0x5e, 0x0c, 0x8d, 0xed, 0x49, 0xa6, 0x49, 0xa3, 0xbd, 0xca, 0xf8, 0x9c, + 0xcf, 0xdf, 0xf9, 0xfd, 0x1c, 0x38, 0x98, 0xaa, 0x74, 0xd2, 0xe3, 0x71, 0x6f, 0xd1, 0xef, 0x71, + 0x19, 0x25, 0x24, 0x8e, 0x78, 0x24, 0x26, 0x54, 0x85, 0xa9, 0x92, 0x5a, 0x22, 0xc8, 0xfd, 0x21, + 0x8f, 0xc3, 0x45, 0xbf, 0x7d, 0x30, 0x95, 0x72, 0xca, 0x69, 0xcf, 0x78, 0xe2, 0xf9, 0x5d, 0x2f, + 0x99, 0xab, 0x48, 0x33, 0x29, 0x2c, 0xb6, 0x7d, 0xb8, 0xee, 0xd7, 0x6c, 0x46, 0x33, 0x1d, 0xcd, + 0x52, 0x0b, 0xe8, 0xfc, 0xeb, 0x01, 0xba, 0x92, 0x51, 0x32, 0xb0, 0x31, 0x30, 0xfd, 0x63, 0x4e, + 0x33, 0x8d, 0x46, 0xb0, 0xcf, 0x04, 0xd3, 0x2c, 0xe2, 0x44, 0x59, 0x53, 0xe0, 0x1d, 0x79, 0xdd, + 0xc6, 0xc9, 0x97, 0xe1, 0x2a, 0x7a, 0x78, 0x61, 0x21, 0x9b, 0xf7, 0x87, 0x3b, 0xf8, 0x13, 0x77, + 0xbf, 0x60, 0xfc, 0x11, 0xf6, 0x26, 0x9c, 0x51, 0xa1, 0x49, 0xa6, 0x23, 0x9d, 0x05, 0x1f, 0x19, + 0xba, 0xcf, 0xcb, 0x74, 0xa7, 0xc6, 0x3f, 0xce, 0xdd, 0xc3, 0x1d, 0xdc, 0x98, 0xac, 0x8e, 0x83, + 0x37, 0xd0, 0x2a, 0xb7, 0xa2, 0x48, 0x8a, 0xe8, 0x3f, 0x53, 0xda, 0xe9, 0x41, 0x6b, 0x6b, 0x26, + 0x08, 0x81, 0x2f, 0xa2, 0x19, 0x35, 0xe9, 0xd7, 0xb1, 0xf9, 0xee, 0xfc, 0x0e, 0xaf, 0x4a, 0xb1, + 0x46, 0x54, 0x5d, 0xcb, 0x07, 0x2a, 0xd0, 0x31, 0xa0, 0x27, 0x41, 0x74, 0x6e, 0x75, 0x17, 0x9b, + 0x7c, 0x45, 0x6d, 0xd1, 0x6f, 0xa0, 0x2e, 0xe6, 0x33, 0x32, 0x89, 0x38, 0xb7, 0xd5, 0x54, 0x70, + 0x4d, 0xcc, 0x67, 0xa7, 0xf9, 0xb9, 0xf3, 0x4f, 0x05, 0x1a, 0xa5, 0x10, 0xe8, 0x7b, 0xa8, 0x2f, + 0x3b, 0xef, 0x3a, 0xd9, 0x0e, 0xed, 0x6c, 0xc2, 0x62, 0x36, 0xe1, 0x75, 0x81, 0xc0, 0x2b, 0x30, + 0xfa, 0x0a, 0x5e, 0x2e, 0xc3, 0xe4, 0xad, 0x53, 0x9a, 0x26, 0x2e, 0xdc, 0x7e, 0x11, 0x6e, 0x6c, + 0xcd, 0x79, 0x01, 0x2b, 0xec, 0x1d, 0x13, 0x2c, 0xbb, 0xa7, 0x49, 0x50, 0x31, 0xe0, 0x66, 0x01, + 0x3e, 0x77, 0x76, 0xf4, 0x1b, 0x7c, 0xbd, 0x89, 0x26, 0x8f, 0x4c, 0xdf, 0x13, 0x37, 0xa9, 0xbb, + 0x88, 0x71, 0x9a, 0x10, 0x2d, 0x49, 0x46, 0x45, 0x12, 0x54, 0x0d, 0xd1, 0xfb, 0x75, 0xa2, 0x1b, + 0xa6, 0xef, 0x6d, 0xad, 0xe7, 0x06, 0x7f, 0x2d, 0xc7, 0x54, 0x24, 0x68, 0x08, 0x5f, 0x3c, 0x43, + 0xff, 0x20, 0xe4, 0xa3, 0x20, 0x8a, 0x4e, 0x28, 0x5b, 0xd0, 0x24, 0x78, 0x61, 0x28, 0xdf, 0xad, + 0x53, 0xfe, 0x92, 0xa3, 0xb0, 0x03, 0xa1, 0x5f, 0x21, 0x78, 0x2e, 0xc9, 0x44, 0xc9, 0x34, 0xa8, + 0x1d, 0x55, 0xba, 0x8d, 0x93, 0xc3, 0x2d, 0x6b, 0x54, 0x8c, 0x16, 0x7f, 0x36, 0x59, 0xcf, 0xf8, + 0x4c, 0xc9, 0xf4, 0xd2, 0xaf, 0xf9, 0xcd, 0xdd, 0x4b, 0xbf, 0xb6, 0xdb, 0xac, 0x76, 0xfe, 0xf3, + 0xe0, 0xd5, 0x93, 0xfd, 0xc9, 0x52, 0x29, 0x32, 0x8a, 0xc6, 0xd0, 0x5c, 0x49, 0xc1, 0xda, 0xdc, + 0x04, 0xdf, 0x7f, 0x48, 0x0b, 0x16, 0x3d, 0xdc, 0xc1, 0xfb, 0x4b, 0x31, 0x38, 0xd2, 0x1f, 0xa0, + 0x91, 0x51, 0xb5, 0xa0, 0x8a, 0x70, 0x96, 0x69, 0x27, 0x86, 0xd7, 0x65, 0xbe, 0xb1, 0x71, 0x5f, + 0x31, 0x23, 0x26, 0xc8, 0x96, 0xa7, 0xc1, 0x5b, 0x68, 0xaf, 0x49, 0xc1, 0x72, 0x5a, 0x2d, 0xfc, + 0xed, 0x41, 0x7b, 0x7b, 0x2a, 0xe8, 0x3b, 0x78, 0xfd, 0xe4, 0x49, 0x21, 0x09, 0xe5, 0x74, 0x1a, + 0xe9, 0x42, 0x1f, 0x9f, 0x96, 0xd6, 0x5c, 0x9d, 0x39, 0x1f, 0xba, 0x85, 0xb7, 0x65, 0xed, 0x12, + 0x45, 0x53, 0xa9, 0x34, 0x61, 0x42, 0x53, 0xb5, 0x88, 0xb8, 0x4b, 0xbf, 0xb5, 0xb1, 0xd0, 0x67, + 0xee, 0x31, 0xc2, 0xad, 0x92, 0x96, 0xb1, 0xb9, 0x7c, 0xe1, 0xee, 0x76, 0x7e, 0x02, 0x58, 0x95, + 0x8a, 0x8e, 0xe1, 0x85, 0x2d, 0x35, 0x0b, 0x3c, 0x33, 0x59, 0xb4, 0xd9, 0x13, 0x5c, 0x40, 0x2e, + 0xfd, 0x5a, 0xa5, 0xe9, 0x77, 0xfe, 0xf2, 0xa0, 0x6a, 0x3d, 0xe8, 0x1d, 0x00, 0x4b, 0x49, 0x94, + 0x24, 0x8a, 0x66, 0x99, 0x29, 0x69, 0x0f, 0xd7, 0x59, 0xfa, 0xb3, 0x35, 0xe4, 0x6f, 0x41, 0x1e, + 0xdb, 0xe4, 0xbb, 0x8b, 0xcd, 0xf7, 0x16, 0xd1, 0x57, 0xb6, 0x88, 0x1e, 0x81, 0x6f, 0xd6, 0xce, + 0x3f, 0xf2, 0xba, 0x35, 0x6c, 0xbe, 0xed, 0xfa, 0x9c, 0xc4, 0xb0, 0x57, 0x6a, 0xb8, 0x42, 0x18, + 0x1a, 0xee, 0x3b, 0x37, 0xa3, 0x83, 0x72, 0x1d, 0x9b, 0xcf, 0x54, 0xfb, 0x70, 0xab, 0xdf, 0x4e, + 0xae, 0xeb, 0x7d, 0xe3, 0x0d, 0x6e, 0xe0, 0x63, 0x26, 0x4b, 0xc0, 0xc1, 0xcb, 0x72, 0xc8, 0x51, + 0xde, 0xf6, 0x91, 0x77, 0xdb, 0x77, 0x63, 0x98, 0x4a, 0x1e, 0x89, 0x69, 0x28, 0xd5, 0xb4, 0x67, + 0xfe, 0x51, 0x8a, 0x99, 0x9b, 0x13, 0x8f, 0xcd, 0x0f, 0xe1, 0x31, 0x59, 0xf4, 0xe3, 0xaa, 0x19, + 0xd9, 0xb7, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x81, 0x14, 0xee, 0xd1, 0x7b, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go new file mode 100644 index 0000000000..d881a9211b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -0,0 +1,485 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package grpclb defines a grpclb balancer. +// +// To install grpclb balancer, import this package as: +// import _ "google.golang.org/grpc/balancer/grpclb" +package grpclb + +import ( + "context" + "errors" + "strconv" + "sync" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/resolver" +) + +const ( + lbTokeyKey = "lb-token" + defaultFallbackTimeout = 10 * time.Second + grpclbName = "grpclb" +) + +var ( + // defaultBackoffConfig configures the backoff strategy that's used when the + // init handshake in the RPC is unsuccessful. It's not for the clientconn + // reconnect backoff. + // + // It has the same value as the default grpc.DefaultBackoffConfig. + // + // TODO: make backoff configurable. + defaultBackoffConfig = backoff.Exponential{ + MaxDelay: 120 * time.Second, + } + errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") +) + +func convertDuration(d *durationpb.Duration) time.Duration { + if d == nil { + return 0 + } + return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond +} + +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) { + desc := &grpc.StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + grpc.ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { + m := new(lbpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func init() { + balancer.Register(newLBBuilder()) +} + +// newLBBuilder creates a builder for grpclb. +func newLBBuilder() balancer.Builder { + return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout) +} + +// newLBBuilderWithFallbackTimeout creates a grpclb builder with the given +// fallbackTimeout. If no response is received from the remote balancer within +// fallbackTimeout, the backend addresses from the resolved address list will be +// used. +// +// Only call this function when a non-default fallback timeout is needed. +func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { + return &lbBuilder{ + fallbackTimeout: fallbackTimeout, + } +} + +type lbBuilder struct { + fallbackTimeout time.Duration +} + +func (b *lbBuilder) Name() string { + return grpclbName +} + +func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + // This generates a manual resolver builder with a random scheme. This + // scheme will be used to dial to remote LB, so we can send filtered address + // updates to remote LB ClientConn using this manual resolver. + scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36) + r := &lbManualResolver{scheme: scheme, ccb: cc} + + lb := &lbBalancer{ + cc: newLBCacheClientConn(cc), + target: opt.Target.Endpoint, + opt: opt, + fallbackTimeout: b.fallbackTimeout, + doneCh: make(chan struct{}), + + manualResolver: r, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + clientStats: newRPCStats(), + backoff: defaultBackoffConfig, // TODO: make backoff configurable. + } + + var err error + if opt.CredsBundle != nil { + lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) + if err != nil { + grpclog.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + } + lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) + if err != nil { + grpclog.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + } + } + + return lb +} + +type lbBalancer struct { + cc *lbCacheClientConn + target string + opt balancer.BuildOptions + + usePickFirst bool + + // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb + // servers. If it's nil, use the TransportCredentials from BuildOptions + // instead. + grpclbClientConnCreds credentials.Bundle + // grpclbBackendCreds is the creds bundle to be used for addresses that are + // returned by grpclb server. If it's nil, don't set anything when creating + // SubConns. + grpclbBackendCreds credentials.Bundle + + fallbackTimeout time.Duration + doneCh chan struct{} + + // manualResolver is used in the remote LB ClientConn inside grpclb. When + // resolved address updates are received by grpclb, filtered updates will be + // send to remote LB ClientConn through this resolver. + manualResolver *lbManualResolver + // The ClientConn to talk to the remote balancer. + ccRemoteLB *grpc.ClientConn + // backoff for calling remote balancer. + backoff backoff.Strategy + + // Support client side load reporting. Each picker gets a reference to this, + // and will update its content. + clientStats *rpcStats + + mu sync.Mutex // guards everything following. + // The full server list including drops, used to check if the newly received + // serverList contains anything new. Each generate picker will also have + // reference to this list to do the first layer pick. + fullServerList []*lbpb.Server + // Backend addresses. It's kept so the addresses are available when + // switching between round_robin and pickfirst. + backendAddrs []resolver.Address + // All backends addresses, with metadata set to nil. This list contains all + // backend addresses in the same order and with the same duplicates as in + // serverlist. When generating picker, a SubConn slice with the same order + // but with only READY SCs will be gerenated. + backendAddrsWithoutMetadata []resolver.Address + // Roundrobin functionalities. + state connectivity.State + subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. + picker balancer.Picker + // Support fallback to resolved backend addresses if there's no response + // from remote balancer within fallbackTimeout. + remoteBalancerConnected bool + serverListReceived bool + inFallback bool + // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set + // when resolved address updates are received, and read in the goroutine + // handling fallback. + resolvedBackendAddrs []resolver.Address +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker from +// it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// Caller must hold lb.mu. +func (lb *lbBalancer) regeneratePicker(resetDrop bool) { + if lb.state == connectivity.TransientFailure { + lb.picker = &errPicker{err: balancer.ErrTransientFailure} + return + } + + if lb.state == connectivity.Connecting { + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + + var readySCs []balancer.SubConn + if lb.usePickFirst { + for _, sc := range lb.subConns { + readySCs = append(readySCs, sc) + break + } + } else { + for _, a := range lb.backendAddrsWithoutMetadata { + if sc, ok := lb.subConns[a]; ok { + if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + } + } + + if len(readySCs) <= 0 { + // If there's no ready SubConns, always re-pick. This is to avoid drops + // unless at least one SubConn is ready. Otherwise we may drop more + // often than want because of drops + re-picks(which become re-drops). + // + // This doesn't seem to be necessary after the connecting check above. + // Kept for safety. + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + if lb.inFallback { + lb.picker = newRRPicker(readySCs) + return + } + if resetDrop { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker, ok := lb.picker.(*lbPicker) + if !ok { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker.updateReadySCs(readySCs) +} + +// aggregateSubConnStats calculate the aggregated state of SubConns in +// lb.SubConns. These SubConns are subconns in use (when switching between +// fallback and grpclb). lb.scState contains states for all SubConns, including +// those in cache (SubConns are cached for 10 seconds after remove). +// +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { + var numConnecting uint64 + + for _, sc := range lb.subConns { + if state, ok := lb.scStates[sc]; ok { + switch state { + case connectivity.Ready: + return connectivity.Ready + case connectivity.Connecting: + numConnecting++ + } + } + } + if numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} + +func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + panic("not used") +} + +func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + s := scs.ConnectivityState + if grpclog.V(2) { + grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + oldS, ok := lb.scStates[sc] + if !ok { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + lb.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(lb.scStates, sc) + } + // Force regenerate picker if + // - this sc became ready from not-ready + // - this sc became not-ready from ready + lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false) + + // Enter fallback when the aggregated state is not Ready and the connection + // to remote balancer is lost. + if lb.state != connectivity.Ready { + if !lb.inFallback && !lb.remoteBalancerConnected { + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + } +} + +// updateStateAndPicker re-calculate the aggregated state, and regenerate picker +// if overall state is changed. +// +// If forceRegeneratePicker is true, picker will be regenerated. +func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) { + oldAggrState := lb.state + lb.state = lb.aggregateSubConnStates() + // Regenerate picker when one of the following happens: + // - caller wants to regenerate + // - the aggregated state changed + if forceRegeneratePicker || (lb.state != oldAggrState) { + lb.regeneratePicker(resetDrop) + } + + lb.cc.UpdateBalancerState(lb.state, lb.picker) +} + +// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use +// resolved backends (backends received from resolver, not from remote balancer) +// if no connection to remote balancers was successful. +func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { + timer := time.NewTimer(fallbackTimeout) + defer timer.Stop() + select { + case <-timer.C: + case <-lb.doneCh: + return + } + lb.mu.Lock() + if lb.inFallback || lb.serverListReceived { + lb.mu.Unlock() + return + } + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + lb.mu.Unlock() +} + +// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB +// clientConn. The remoteLB clientConn will handle creating/removing remoteLB +// connections. +func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + panic("not used") +} + +func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { + lb.mu.Lock() + defer lb.mu.Unlock() + + newUsePickFirst := childIsPickFirst(gc) + if lb.usePickFirst == newUsePickFirst { + return + } + if grpclog.V(2) { + grpclog.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + } + lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) +} + +func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + } + gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) + lb.handleServiceConfig(gc) + + addrs := ccs.ResolverState.Addresses + if len(addrs) <= 0 { + return + } + + var remoteBalancerAddrs, backendAddrs []resolver.Address + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + a.Type = resolver.Backend + remoteBalancerAddrs = append(remoteBalancerAddrs, a) + } else { + backendAddrs = append(backendAddrs, a) + } + } + + if lb.ccRemoteLB == nil { + if len(remoteBalancerAddrs) <= 0 { + grpclog.Errorf("grpclb: no remote balancer address is available, should never happen") + return + } + // First time receiving resolved addresses, create a cc to remote + // balancers. + lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName) + // Start the fallback goroutine. + go lb.fallbackToBackendsAfter(lb.fallbackTimeout) + } + + // cc to remote balancers uses lb.manualResolver. Send the updated remote + // balancer addresses to it through manualResolver. + lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs}) + + lb.mu.Lock() + lb.resolvedBackendAddrs = backendAddrs + if lb.inFallback { + // This means we received a new list of resolved backends, and we are + // still in fallback mode. Need to update the list of backends we are + // using to the new list of backends. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + lb.mu.Unlock() +} + +func (lb *lbBalancer) Close() { + select { + case <-lb.doneCh: + return + default: + } + close(lb.doneCh) + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.Close() + } + lb.cc.close() +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go new file mode 100644 index 0000000000..aac3719631 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "encoding/json" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/serviceconfig" +) + +const ( + roundRobinName = roundrobin.Name + pickFirstName = grpc.PickFirstBalancerName +) + +type grpclbServiceConfig struct { + serviceconfig.LoadBalancingConfig + ChildPolicy *[]map[string]json.RawMessage +} + +func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + ret := &grpclbServiceConfig{} + if err := json.Unmarshal(lbConfig, ret); err != nil { + return nil, err + } + return ret, nil +} + +func childIsPickFirst(sc *grpclbServiceConfig) bool { + if sc == nil { + return false + } + childConfigs := sc.ChildPolicy + if childConfigs == nil { + return false + } + for _, childC := range *childConfigs { + // If round_robin exists before pick_first, return false + if _, ok := childC[roundRobinName]; ok { + return false + } + // If pick_first is before round_robin, return true + if _, ok := childC[pickFirstName]; ok { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go new file mode 100644 index 0000000000..6f023bc5ee --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "context" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/status" +) + +// rpcStats is same as lbmpb.ClientStats, except that numCallsDropped is a map +// instead of a slice. +type rpcStats struct { + // Only access the following fields atomically. + numCallsStarted int64 + numCallsFinished int64 + numCallsFinishedWithClientFailedToSend int64 + numCallsFinishedKnownReceived int64 + + mu sync.Mutex + // map load_balance_token -> num_calls_dropped + numCallsDropped map[string]int64 +} + +func newRPCStats() *rpcStats { + return &rpcStats{ + numCallsDropped: make(map[string]int64), + } +} + +// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. +func (s *rpcStats) toClientStats() *lbpb.ClientStats { + stats := &lbpb.ClientStats{ + NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0), + NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0), + NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0), + NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0), + } + s.mu.Lock() + dropped := s.numCallsDropped + s.numCallsDropped = make(map[string]int64) + s.mu.Unlock() + for token, count := range dropped { + stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{ + LoadBalanceToken: token, + NumCalls: count, + }) + } + return stats +} + +func (s *rpcStats) drop(token string) { + atomic.AddInt64(&s.numCallsStarted, 1) + s.mu.Lock() + s.numCallsDropped[token]++ + s.mu.Unlock() + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) failedToSend() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) knownReceived() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +type errPicker struct { + // Pick always returns this err. + err error +} + +func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} + +// rrPicker does roundrobin on subConns. It's typically used when there's no +// response from remote balancer, and grpclb falls back to the resolved +// backends. +// +// It guaranteed that len(subConns) > 0. +type rrPicker struct { + mu sync.Mutex + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int +} + +func newRRPicker(readySCs []balancer.SubConn) *rrPicker { + return &rrPicker{ + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + } +} + +func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + p.mu.Lock() + defer p.mu.Unlock() + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + return sc, nil, nil +} + +// lbPicker does two layers of picks: +// +// First layer: roundrobin on all servers in serverList, including drops and backends. +// - If it picks a drop, the RPC will fail as being dropped. +// - If it picks a backend, do a second layer pick to pick the real backend. +// +// Second layer: roundrobin on all READY backends. +// +// It's guaranteed that len(serverList) > 0. +type lbPicker struct { + mu sync.Mutex + serverList []*lbpb.Server + serverListNext int + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int + + stats *rpcStats +} + +func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker { + return &lbPicker{ + serverList: serverList, + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + stats: stats, + } +} + +func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + p.mu.Lock() + defer p.mu.Unlock() + + // Layer one roundrobin on serverList. + s := p.serverList[p.serverListNext] + p.serverListNext = (p.serverListNext + 1) % len(p.serverList) + + // If it's a drop, return an error and fail the RPC. + if s.Drop { + p.stats.drop(s.LoadBalanceToken) + return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb") + } + + // If not a drop but there's no ready subConns. + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + // Return the next ready subConn in the list, also collect rpc stats. + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + done := func(info balancer.DoneInfo) { + if !info.BytesSent { + p.stats.failedToSend() + } else if info.BytesReceived { + p.stats.knownReceived() + } + } + return sc, done, nil +} + +func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) { + p.mu.Lock() + defer p.mu.Unlock() + + p.subConns = readySCs + p.subConnsNext = p.subConnsNext % len(readySCs) +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go new file mode 100644 index 0000000000..86320bff69 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -0,0 +1,353 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "context" + "fmt" + "io" + "net" + "reflect" + "time" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// processServerList updates balaner's internal state, create/remove SubConns +// and regenerates picker using the received serverList. +func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: processing server list: %+v", l) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + // Set serverListReceived to true so fallback will not take effect if it has + // not hit timeout. + lb.serverListReceived = true + + // If the new server list == old server list, do nothing. + if reflect.DeepEqual(lb.fullServerList, l.Servers) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + } + return + } + lb.fullServerList = l.Servers + + var backendAddrs []resolver.Address + for i, s := range l.Servers { + if s.Drop { + continue + } + + md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } + addr := resolver.Address{ + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), + Metadata: &md, + } + if grpclog.V(2) { + grpclog.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", + i, ipStr, s.Port, s.LoadBalanceToken) + } + backendAddrs = append(backendAddrs, addr) + } + + // Call refreshSubConns to create/remove SubConns. If we are in fallback, + // this is also exiting fallback. + lb.refreshSubConns(backendAddrs, false, lb.usePickFirst) +} + +// refreshSubConns creates/removes SubConns with backendAddrs, and refreshes +// balancer state and picker. +// +// Caller must hold lb.mu. +func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) { + lb.inFallback = fallback + + opts := balancer.NewSubConnOptions{} + if !fallback { + opts.CredsBundle = lb.grpclbBackendCreds + } + + lb.backendAddrs = backendAddrs + lb.backendAddrsWithoutMetadata = nil + + if lb.usePickFirst != pickFirst { + // Remove all SubConns when switching modes. + for a, sc := range lb.subConns { + if lb.usePickFirst { + lb.cc.cc.RemoveSubConn(sc) + } else { + lb.cc.RemoveSubConn(sc) + } + delete(lb.subConns, a) + } + lb.usePickFirst = pickFirst + } + + if lb.usePickFirst { + var sc balancer.SubConn + for _, sc = range lb.subConns { + break + } + if sc != nil { + sc.UpdateAddresses(backendAddrs) + sc.Connect() + return + } + // This bypasses the cc wrapper with SubConn cache. + sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) + if err != nil { + grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + return + } + sc.Connect() + lb.subConns[backendAddrs[0]] = sc + lb.scStates[sc] = connectivity.Idle + return + } + + // addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick + // lookup for an address. + addrsSet := make(map[resolver.Address]struct{}) + // Create new SubConns. + for _, addr := range backendAddrs { + addrWithoutMD := addr + addrWithoutMD.Metadata = nil + addrsSet[addrWithoutMD] = struct{}{} + lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutMD) + + if _, ok := lb.subConns[addrWithoutMD]; !ok { + // Use addrWithMD to create the SubConn. + sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) + if err != nil { + grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + continue + } + lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map. + if _, ok := lb.scStates[sc]; !ok { + // Only set state of new sc to IDLE. The state could already be + // READY for cached SubConns. + lb.scStates[sc] = connectivity.Idle + } + sc.Connect() + } + } + + for a, sc := range lb.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + lb.cc.RemoveSubConn(sc) + delete(lb.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } + + // Regenerate and update picker after refreshing subconns because with + // cache, even if SubConn was newed/removed, there might be no state + // changes (the subconn will be kept in cache, not actually + // newed/removed). + lb.updateStateAndPicker(true, true) +} + +func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error { + for { + reply, err := s.Recv() + if err != nil { + if err == io.EOF { + return errServerTerminatedConnection + } + return fmt.Errorf("grpclb: failed to recv server list: %v", err) + } + if serverList := reply.GetServerList(); serverList != nil { + lb.processServerList(serverList) + } + } +} + +func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + case <-s.Context().Done(): + return + } + stats := lb.clientStats.toClientStats() + t := time.Now() + stats.Timestamp = ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + ClientStats: stats, + }, + }); err != nil { + return + } + } +} + +func (lb *lbBalancer) callRemoteBalancer() (backoff bool, _ error) { + lbClient := &loadBalancerClient{cc: lb.ccRemoteLB} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) + if err != nil { + return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + } + lb.mu.Lock() + lb.remoteBalancerConnected = true + lb.mu.Unlock() + + // grpclb handshake on the stream. + initReq := &lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbpb.InitialLoadBalanceRequest{ + Name: lb.target, + }, + }, + } + if err := stream.Send(initReq); err != nil { + return true, fmt.Errorf("grpclb: failed to send init request: %v", err) + } + reply, err := stream.Recv() + if err != nil { + return true, fmt.Errorf("grpclb: failed to recv init response: %v", err) + } + initResp := reply.GetInitialResponse() + if initResp == nil { + return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response") + } + if initResp.LoadBalancerDelegate != "" { + return true, fmt.Errorf("grpclb: Delegation is not supported") + } + + go func() { + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + lb.sendLoadReport(stream, d) + } + }() + // No backoff if init req/resp handshake was successful. + return false, lb.readServerList(stream) +} + +func (lb *lbBalancer) watchRemoteBalancer() { + var retryCount int + for { + doBackoff, err := lb.callRemoteBalancer() + select { + case <-lb.doneCh: + return + default: + if err != nil { + if err == errServerTerminatedConnection { + grpclog.Info(err) + } else { + grpclog.Warning(err) + } + } + } + // Trigger a re-resolve when the stream errors. + lb.cc.cc.ResolveNow(resolver.ResolveNowOption{}) + + lb.mu.Lock() + lb.remoteBalancerConnected = false + lb.fullServerList = nil + // Enter fallback when connection to remote balancer is lost, and the + // aggregated state is not Ready. + if !lb.inFallback && lb.state != connectivity.Ready { + // Entering fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + lb.mu.Unlock() + + if !doBackoff { + retryCount = 0 + continue + } + + timer := time.NewTimer(lb.backoff.Backoff(retryCount)) + select { + case <-timer.C: + case <-lb.doneCh: + timer.Stop() + return + } + retryCount++ + } +} + +func (lb *lbBalancer) dialRemoteLB(remoteLBName string) { + var dopts []grpc.DialOption + if creds := lb.opt.DialCreds; creds != nil { + if err := creds.OverrideServerName(remoteLBName); err == nil { + dopts = append(dopts, grpc.WithTransportCredentials(creds)) + } else { + grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err) + dopts = append(dopts, grpc.WithInsecure()) + } + } else if bundle := lb.grpclbClientConnCreds; bundle != nil { + dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) + } else { + dopts = append(dopts, grpc.WithInsecure()) + } + if lb.opt.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) + } + // Explicitly set pickfirst as the balancer. + dopts = append(dopts, grpc.WithBalancerName(grpc.PickFirstBalancerName)) + wrb := internal.WithResolverBuilder.(func(resolver.Builder) grpc.DialOption) + dopts = append(dopts, wrb(lb.manualResolver)) + if channelz.IsOn() { + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) + } + + // DialContext using manualResolver.Scheme, which is a random scheme + // generated when init grpclb. The target scheme here is not important. + // + // The grpc dial target will be used by the creds (ALTS) as the authority, + // so it has to be set to remoteLBName that comes from resolver. + cc, err := grpc.DialContext(context.Background(), remoteLBName, dopts...) + if err != nil { + grpclog.Fatalf("failed to dial: %v", err) + } + lb.ccRemoteLB = cc + go lb.watchRemoteBalancer() +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go new file mode 100644 index 0000000000..2663c37e3f --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -0,0 +1,209 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +// The parent ClientConn should re-resolve when grpclb loses connection to the +// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, +// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's +// ResolveNow, and eventually results in re-resolve happening in parent +// ClientConn's resolver (DNS for example). +// +// parent +// ClientConn +// +-----------------------------------------------------------------+ +// | parent +---------------------------------+ | +// | DNS ClientConn | grpclb | | +// | resolver balancerWrapper | | | +// | + + | grpclb grpclb | | +// | | | | ManualResolver ClientConn | | +// | | | | + + | | +// | | | | | | Transient | | +// | | | | | | Failure | | +// | | | | | <--------- | | | +// | | | <--------------- | ResolveNow | | | +// | | <--------- | ResolveNow | | | | | +// | | ResolveNow | | | | | | +// | | | | | | | | +// | + + | + + | | +// | +---------------------------------+ | +// +-----------------------------------------------------------------+ + +// lbManualResolver is used by the ClientConn inside grpclb. It's a manual +// resolver with a special ResolveNow() function. +// +// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, +// so when grpclb client lose contact with remote balancers, the parent +// ClientConn's resolver will re-resolve. +type lbManualResolver struct { + scheme string + ccr resolver.ClientConn + + ccb balancer.ClientConn +} + +func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) { + r.ccr = cc + return r, nil +} + +func (r *lbManualResolver) Scheme() string { + return r.scheme +} + +// ResolveNow calls resolveNow on the parent ClientConn. +func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) { + r.ccb.ResolveNow(o) +} + +// Close is a noop for Resolver. +func (*lbManualResolver) Close() {} + +// UpdateState calls cc.UpdateState. +func (r *lbManualResolver) UpdateState(s resolver.State) { + r.ccr.UpdateState(s) +} + +const subConnCacheTime = time.Second * 10 + +// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. +// SubConns will be kept in cache for subConnCacheTime before being removed. +// +// Its new and remove methods are updated to do cache first. +type lbCacheClientConn struct { + cc balancer.ClientConn + timeout time.Duration + + mu sync.Mutex + // subConnCache only keeps subConns that are being deleted. + subConnCache map[resolver.Address]*subConnCacheEntry + subConnToAddr map[balancer.SubConn]resolver.Address +} + +type subConnCacheEntry struct { + sc balancer.SubConn + + cancel func() + abortDeleting bool +} + +func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { + return &lbCacheClientConn{ + cc: cc, + timeout: subConnCacheTime, + subConnCache: make(map[resolver.Address]*subConnCacheEntry), + subConnToAddr: make(map[balancer.SubConn]resolver.Address), + } +} + +func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) != 1 { + return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) + } + addrWithoutMD := addrs[0] + addrWithoutMD.Metadata = nil + + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry, ok := ccc.subConnCache[addrWithoutMD]; ok { + // If entry is in subConnCache, the SubConn was being deleted. + // cancel function will never be nil. + entry.cancel() + delete(ccc.subConnCache, addrWithoutMD) + return entry.sc, nil + } + + scNew, err := ccc.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + + ccc.subConnToAddr[scNew] = addrWithoutMD + return scNew, nil +} + +func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + ccc.mu.Lock() + defer ccc.mu.Unlock() + addr, ok := ccc.subConnToAddr[sc] + if !ok { + return + } + + if entry, ok := ccc.subConnCache[addr]; ok { + if entry.sc != sc { + // This could happen if NewSubConn was called multiple times for the + // same address, and those SubConns are all removed. We remove sc + // immediately here. + delete(ccc.subConnToAddr, sc) + ccc.cc.RemoveSubConn(sc) + } + return + } + + entry := &subConnCacheEntry{ + sc: sc, + } + ccc.subConnCache[addr] = entry + + timer := time.AfterFunc(ccc.timeout, func() { + ccc.mu.Lock() + if entry.abortDeleting { + return + } + ccc.cc.RemoveSubConn(sc) + delete(ccc.subConnToAddr, sc) + delete(ccc.subConnCache, addr) + ccc.mu.Unlock() + }) + entry.cancel = func() { + if !timer.Stop() { + // If stop was not successful, the timer has fired (this can only + // happen in a race). But the deleting function is blocked on ccc.mu + // because the mutex was held by the caller of this function. + // + // Set abortDeleting to true to abort the deleting function. When + // the lock is released, the deleting function will acquire the + // lock, check the value of abortDeleting and return. + entry.abortDeleting = true + } + } +} + +func (ccc *lbCacheClientConn) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccc.cc.UpdateBalancerState(s, p) +} + +func (ccc *lbCacheClientConn) close() { + ccc.mu.Lock() + // Only cancel all existing timers. There's no need to remove SubConns. + for _, entry := range ccc.subConnCache { + entry.cancel() + } + ccc.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/regenerate.sh b/vendor/google.golang.org/grpc/balancer/grpclb/regenerate.sh new file mode 100644 index 0000000000..b8978e11b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/regenerate.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +TMP=$(mktemp -d) + +function finish { + rm -rf "$TMP" +} +trap finish EXIT + +pushd "$TMP" +mkdir -p grpc/lb/v1 +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/lb/v1/load_balancer.proto > grpc/lb/v1/load_balancer.proto + +protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/lb/v1/*.proto +popd +rm -f grpc_lb_v1/*.pb.go +cp "$TMP"/grpc/lb/v1/*.pb.go grpc_lb_v1/ + diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go new file mode 100644 index 0000000000..72c7f0b23f --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/alts.go @@ -0,0 +1,330 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package alts implements the ALTS credential support by gRPC library, which +// encapsulates all the state needed by a client to authenticate with a server +// using ALTS and make various assertions, e.g., about the client's identity, +// role, or whether it is authorized to make a particular call. +// This package is experimental. +package alts + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "time" + + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/handshaker" + "google.golang.org/grpc/credentials/alts/internal/handshaker/service" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/grpclog" +) + +const ( + // hypervisorHandshakerServiceAddress represents the default ALTS gRPC + // handshaker service address in the hypervisor. + hypervisorHandshakerServiceAddress = "metadata.google.internal:8080" + // defaultTimeout specifies the server handshake timeout. + defaultTimeout = 30.0 * time.Second + // The following constants specify the minimum and maximum acceptable + // protocol versions. + protocolVersionMaxMajor = 2 + protocolVersionMaxMinor = 1 + protocolVersionMinMajor = 2 + protocolVersionMinMinor = 1 +) + +var ( + once sync.Once + maxRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMaxMajor, + Minor: protocolVersionMaxMinor, + } + minRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMinMajor, + Minor: protocolVersionMinMinor, + } + // ErrUntrustedPlatform is returned from ClientHandshake and + // ServerHandshake is running on a platform where the trustworthiness of + // the handshaker service is not guaranteed. + ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") +) + +// AuthInfo exposes security information from the ALTS handshake to the +// application. This interface is to be implemented by ALTS. Users should not +// need a brand new implementation of this interface. For situations like +// testing, any new implementation should embed this interface. This allows +// ALTS to add new methods to this interface. +type AuthInfo interface { + // ApplicationProtocol returns application protocol negotiated for the + // ALTS connection. + ApplicationProtocol() string + // RecordProtocol returns the record protocol negotiated for the ALTS + // connection. + RecordProtocol() string + // SecurityLevel returns the security level of the created ALTS secure + // channel. + SecurityLevel() altspb.SecurityLevel + // PeerServiceAccount returns the peer service account. + PeerServiceAccount() string + // LocalServiceAccount returns the local service account. + LocalServiceAccount() string + // PeerRPCVersions returns the RPC version supported by the peer. + PeerRPCVersions() *altspb.RpcProtocolVersions +} + +// ClientOptions contains the client-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ClientOptions struct { + // TargetServiceAccounts contains a list of expected target service + // accounts. + TargetServiceAccounts []string + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultClientOptions creates a new ClientOptions object with the default +// values. +func DefaultClientOptions() *ClientOptions { + return &ClientOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// ServerOptions contains the server-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ServerOptions struct { + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultServerOptions creates a new ServerOptions object with the default +// values. +func DefaultServerOptions() *ServerOptions { + return &ServerOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// altsTC is the credentials required for authenticating a connection using ALTS. +// It implements credentials.TransportCredentials interface. +type altsTC struct { + info *credentials.ProtocolInfo + side core.Side + accounts []string + hsAddress string +} + +// NewClientCreds constructs a client-side ALTS TransportCredentials object. +func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials { + return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress) +} + +// NewServerCreds constructs a server-side ALTS TransportCredentials object. +func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials { + return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress) +} + +func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials { + once.Do(func() { + vmOnGCP = isRunningOnGCP() + }) + + if hsAddress == "" { + hsAddress = hypervisorHandshakerServiceAddress + } + return &altsTC{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: "alts", + SecurityVersion: "1.0", + }, + side: side, + accounts: accounts, + hsAddress: hsAddress, + } +} + +// ClientHandshake implements the client side handshake protocol. +func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it is shared with other handshakes. + + // Possible context leak: + // The cancel function for the child context we create will only be + // called a non-nil error is returned. + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + opts := handshaker.DefaultClientHandshakerOptions() + opts.TargetName = addr + opts.TargetServiceAccounts = g.accounts + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + chs.Close() + } + }() + secConn, authInfo, err := chs.ClientHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +// ServerHandshake implements the server side ALTS handshaker. +func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it's shared with other handshakes. + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + opts := handshaker.DefaultServerHandshakerOptions() + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + shs.Close() + } + }() + secConn, authInfo, err := shs.ServerHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +func (g *altsTC) Info() credentials.ProtocolInfo { + return *g.info +} + +func (g *altsTC) Clone() credentials.TransportCredentials { + info := *g.info + var accounts []string + if g.accounts != nil { + accounts = make([]string, len(g.accounts)) + copy(accounts, g.accounts) + } + return &altsTC{ + info: &info, + side: g.side, + hsAddress: g.hsAddress, + accounts: accounts, + } +} + +func (g *altsTC) OverrideServerName(serverNameOverride string) error { + g.info.ServerName = serverNameOverride + return nil +} + +// compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2. +func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { + switch { + case v1.GetMajor() > v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor(): + return 1 + case v1.GetMajor() < v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor(): + return -1 + } + return 0 +} + +// checkRPCVersions performs a version check between local and peer rpc protocol +// versions. This function returns true if the check passes which means both +// parties agreed on a common rpc protocol to use, and false otherwise. The +// function also returns the highest common RPC protocol version both parties +// agreed on. +func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { + if local == nil || peer == nil { + grpclog.Error("invalid checkRPCVersions argument, either local or peer is nil.") + return false, nil + } + + // maxCommonVersion is MIN(local.max, peer.max). + maxCommonVersion := local.GetMaxRpcVersion() + if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 { + maxCommonVersion = peer.GetMaxRpcVersion() + } + + // minCommonVersion is MAX(local.min, peer.min). + minCommonVersion := peer.GetMinRpcVersion() + if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 { + minCommonVersion = local.GetMinRpcVersion() + } + + if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 { + return false, nil + } + return true, maxCommonVersion +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go new file mode 100644 index 0000000000..ed628dc7cd --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provide authentication information returned by handshakers. +package authinfo + +import ( + "google.golang.org/grpc/credentials" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +var _ credentials.AuthInfo = (*altsAuthInfo)(nil) + +// altsAuthInfo exposes security information from the ALTS handshake to the +// application. altsAuthInfo is immutable and implements credentials.AuthInfo. +type altsAuthInfo struct { + p *altspb.AltsContext +} + +// New returns a new altsAuthInfo object given handshaker results. +func New(result *altspb.HandshakerResult) credentials.AuthInfo { + return newAuthInfo(result) +} + +func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { + return &altsAuthInfo{ + p: &altspb.AltsContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + RecordProtocol: result.GetRecordProtocol(), + // TODO: assign security level from result. + SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, + PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), + LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), + PeerRpcVersions: result.GetPeerRpcVersions(), + }, + } +} + +// AuthType identifies the context as providing ALTS authentication information. +func (s *altsAuthInfo) AuthType() string { + return "alts" +} + +// ApplicationProtocol returns the context's application protocol. +func (s *altsAuthInfo) ApplicationProtocol() string { + return s.p.GetApplicationProtocol() +} + +// RecordProtocol returns the context's record protocol. +func (s *altsAuthInfo) RecordProtocol() string { + return s.p.GetRecordProtocol() +} + +// SecurityLevel returns the context's security level. +func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel { + return s.p.GetSecurityLevel() +} + +// PeerServiceAccount returns the context's peer service account. +func (s *altsAuthInfo) PeerServiceAccount() string { + return s.p.GetPeerServiceAccount() +} + +// LocalServiceAccount returns the context's local service account. +func (s *altsAuthInfo) LocalServiceAccount() string { + return s.p.GetLocalServiceAccount() +} + +// PeerRPCVersions returns the context's peer RPC versions. +func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { + return s.p.GetPeerRpcVersions() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go new file mode 100644 index 0000000000..33fba81239 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package internal contains common core functionality for ALTS. +package internal + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +const ( + // ClientSide identifies the client in this communication. + ClientSide Side = iota + // ServerSide identifies the server in this communication. + ServerSide +) + +// PeerNotRespondingError is returned when a peer server is not responding +// after a channel has been established. It is treated as a temporary connection +// error and re-connection to the server should be attempted. +var PeerNotRespondingError = &peerNotRespondingError{} + +// Side identifies the party's role: client or server. +type Side int + +type peerNotRespondingError struct{} + +// Return an error message for the purpose of logging. +func (e *peerNotRespondingError) Error() string { + return "peer server is not responding and re-connection should be attempted." +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e *peerNotRespondingError) Temporary() bool { + return true +} + +// Handshaker defines a ALTS handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a client-side handshaking and + // returns a secure connection and corresponding auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a server-side handshaking and + // returns a secure connection and corresponding auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the caller + // obtains the secure connection. + Close() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go new file mode 100644 index 0000000000..43726e877b --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "fmt" + "strconv" +) + +// rekeyAEAD holds the necessary information for an AEAD based on +// AES-GCM that performs nonce-based key derivation and XORs the +// nonce with a random mask. +type rekeyAEAD struct { + kdfKey []byte + kdfCounter []byte + nonceMask []byte + nonceBuf []byte + gcmAEAD cipher.AEAD +} + +// KeySizeError signals that the given key does not have the correct size. +type KeySizeError int + +func (k KeySizeError) Error() string { + return "alts/conn: invalid key size " + strconv.Itoa(int(k)) +} + +// newRekeyAEAD creates a new instance of aes128gcm with rekeying. +// The key argument should be 44 bytes, the first 32 bytes are used as a key +// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// the counter. +func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { + k := len(key) + if k != kdfKeyLen+nonceLen { + return nil, KeySizeError(k) + } + return &rekeyAEAD{ + kdfKey: key[:kdfKeyLen], + kdfCounter: make([]byte, kdfCounterLen), + nonceMask: key[kdfKeyLen:], + nonceBuf: make([]byte, nonceLen), + gcmAEAD: nil, + }, nil +} + +// Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Seal for aes128gcm. +func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if err := s.rekeyIfRequired(nonce); err != nil { + panic(fmt.Sprintf("Rekeying failed with: %s", err.Error())) + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData) +} + +// Open rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Open for aes128gcm. +func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if err := s.rekeyIfRequired(nonce); err != nil { + return nil, err + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData) +} + +// rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil +// or cannot be used with given nonce. +func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error { + newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen] + if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) { + return nil + } + copy(s.kdfCounter, newKdfCounter) + a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter)) + if err != nil { + return err + } + s.gcmAEAD, err = cipher.NewGCM(a) + return err +} + +// maskNonce XORs the given nonce with the mask and stores the result in dst. +func maskNonce(dst, nonce, mask []byte) { + nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64]) + nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:]) + mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64]) + mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:]) + binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1) + binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2) +} + +// NonceSize returns the required nonce size. +func (s *rekeyAEAD) NonceSize() int { + return s.gcmAEAD.NonceSize() +} + +// Overhead returns the ciphertext overhead. +func (s *rekeyAEAD) Overhead() int { + return s.gcmAEAD.Overhead() +} + +// hkdfExpand computes the first 16 bytes of the HKDF-expand function +// defined in RFC5869. +func hkdfExpand(key, info []byte) []byte { + mac := hmac.New(sha256.New, key) + mac.Write(info) + mac.Write([]byte{0x01}[:]) + return mac.Sum(nil)[:aeadKeyLen] +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go new file mode 100644 index 0000000000..04e0adb6c9 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/aes" + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCM = 5 +) + +// aes128gcm is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcm struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + aead cipher.AEAD +} + +// NewAES128GCM creates an instance that uses aes128gcm for ALTS record. +func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aes128gcm{ + inCounter: NewInCounter(side, overflowLenAES128GCM), + outCounter: NewOutCounter(side, overflowLenAES128GCM), + aead: a, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.aead.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcm) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := s.aead.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go new file mode 100644 index 0000000000..6a9035ea25 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -0,0 +1,116 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCMRekey = 8 + nonceLen = 12 + aeadKeyLen = 16 + kdfKeyLen = 32 + kdfCounterOffset = 2 + kdfCounterLen = 6 + sizeUint64 = 8 +) + +// aes128gcmRekey is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcmRekey struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + inAEAD cipher.AEAD + outAEAD cipher.AEAD +} + +// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying +// for ALTS record. The key argument should be 44 bytes, the first 32 bytes +// are used as a key for HKDF-expand and the remainining 12 bytes are used +// as a random mask for the counter. +func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { + inCounter := NewInCounter(side, overflowLenAES128GCMRekey) + outCounter := NewOutCounter(side, overflowLenAES128GCMRekey) + inAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + outAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + return &aes128gcmRekey{ + inCounter, + outCounter, + inAEAD, + outAEAD, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcmRekey) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go new file mode 100644 index 0000000000..1795d0c9e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "encoding/binary" + "errors" + "fmt" +) + +const ( + // GcmTagSize is the GCM tag size is the difference in length between + // plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto + // library. + GcmTagSize = 16 +) + +// ErrAuth occurs on authentication failure. +var ErrAuth = errors.New("message authentication failed") + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// ParseFramedMsg parse the provided buffer and returns a frame of the format +// msgLength+msg and any remaining bytes in that buffer. +func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) { + // If the size field is not complete, return the provided buffer as + // remaining buffer. + if len(b) < MsgLenFieldSize { + return nil, b, nil + } + msgLenField := b[:MsgLenFieldSize] + length := binary.LittleEndian.Uint32(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen) + } + if len(b) < int(length)+4 { // account for the first 4 msg length bytes. + // Frame is not complete yet. + return nil, b, nil + } + return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go new file mode 100644 index 0000000000..9f00aca0b6 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "errors" +) + +const counterLen = 12 + +var ( + errInvalidCounter = errors.New("invalid counter") +) + +// Counter is a 96-bit, little-endian counter. +type Counter struct { + value [counterLen]byte + invalid bool + overflowLen int +} + +// Value returns the current value of the counter as a byte slice. +func (c *Counter) Value() ([]byte, error) { + if c.invalid { + return nil, errInvalidCounter + } + return c.value[:], nil +} + +// Inc increments the counter and checks for overflow. +func (c *Counter) Inc() { + // If the counter is already invalid, there is no need to increase it. + if c.invalid { + return + } + i := 0 + for ; i < c.overflowLen; i++ { + c.value[i]++ + if c.value[i] != 0 { + break + } + } + if i == c.overflowLen { + c.invalid = true + } +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go new file mode 100644 index 0000000000..fd5a53d9a7 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -0,0 +1,271 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package conn contains an implementation of a secure channel created by gRPC +// handshakers. +package conn + +import ( + "encoding/binary" + "fmt" + "math" + "net" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +// ALTSRecordCrypto is the interface for gRPC ALTS record protocol. +type ALTSRecordCrypto interface { + // Encrypt encrypts the plaintext and computes the tag (if any) of dst + // and plaintext, dst and plaintext do not overlap. + Encrypt(dst, plaintext []byte) ([]byte, error) + // EncryptionOverhead returns the tag size (if any) in bytes. + EncryptionOverhead() int + // Decrypt decrypts ciphertext and verify the tag (if any). dst and + // ciphertext may alias exactly or not at all. To reuse ciphertext's + // storage for the decrypted output, use ciphertext[:0] as dst. + Decrypt(dst, ciphertext []byte) ([]byte, error) +} + +// ALTSRecordFunc is a function type for factory functions that create +// ALTSRecordCrypto instances. +type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) + +const ( + // MsgLenFieldSize is the byte size of the frame length field of a + // framed message. + MsgLenFieldSize = 4 + // The byte size of the message type field of a framed message. + msgTypeFieldSize = 4 + // The bytes size limit for a ALTS record message. + altsRecordLengthLimit = 1024 * 1024 // 1 MiB + // The default bytes size of a ALTS record message. + altsRecordDefaultLength = 4 * 1024 // 4KiB + // Message type value included in ALTS record framing. + altsRecordMsgType = uint32(0x06) + // The initial write buffer size. + altsWriteBufferInitialSize = 32 * 1024 // 32KiB + // The maximum write buffer size. This *must* be multiple of + // altsRecordDefaultLength. + altsWriteBufferMaxSize = 512 * 1024 // 512KiB +) + +var ( + protocols = make(map[string]ALTSRecordFunc) +) + +// RegisterProtocol register a ALTS record encryption protocol. +func RegisterProtocol(protocol string, f ALTSRecordFunc) error { + if _, ok := protocols[protocol]; ok { + return fmt.Errorf("protocol %v is already registered", protocol) + } + protocols[protocol] = f + return nil +} + +// conn represents a secured connection. It implements the net.Conn interface. +type conn struct { + net.Conn + crypto ALTSRecordCrypto + // buf holds data that has been read from the connection and decrypted, + // but has not yet been returned by Read. + buf []byte + payloadLengthLimit int + // protected holds data read from the network but have not yet been + // decrypted. This data might not compose a complete frame. + protected []byte + // writeBuf is a buffer used to contain encrypted frames before being + // written to the network. + writeBuf []byte + // nextFrame stores the next frame (in protected buffer) info. + nextFrame []byte + // overhead is the calculated overhead of each frame. + overhead int +} + +// NewConn creates a new secure channel instance given the other party role and +// handshaking result. +func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) { + newCrypto := protocols[recordProtocol] + if newCrypto == nil { + return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol) + } + crypto, err := newCrypto(side, key) + if err != nil { + return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err) + } + overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() + payloadLengthLimit := altsRecordDefaultLength - overhead + if protected == nil { + // We pre-allocate protected to be of size + // 2*altsRecordDefaultLength-1 during initialization. We only + // read from the network into protected when protected does not + // contain a complete frame, which is at most + // altsRecordDefaultLength-1 (bytes). And we read at most + // altsRecordDefaultLength (bytes) data into protected at one + // time. Therefore, 2*altsRecordDefaultLength-1 is large enough + // to buffer data read from the network. + protected = make([]byte, 0, 2*altsRecordDefaultLength-1) + } + + altsConn := &conn{ + Conn: c, + crypto: crypto, + payloadLengthLimit: payloadLengthLimit, + protected: protected, + writeBuf: make([]byte, altsWriteBufferInitialSize), + nextFrame: protected, + overhead: overhead, + } + return altsConn, nil +} + +// Read reads and decrypts a frame from the underlying connection, and copies the +// decrypted payload into b. If the size of the payload is greater than len(b), +// Read retains the remaining bytes in an internal buffer, and subsequent calls +// to Read will read from this buffer until it is exhausted. +func (p *conn) Read(b []byte) (n int, err error) { + if len(p.buf) == 0 { + var framedMsg []byte + framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit) + if err != nil { + return n, err + } + // Check whether the next frame to be decrypted has been + // completely received yet. + if len(framedMsg) == 0 { + copy(p.protected, p.nextFrame) + p.protected = p.protected[:len(p.nextFrame)] + // Always copy next incomplete frame to the beginning of + // the protected buffer and reset nextFrame to it. + p.nextFrame = p.protected + } + // Check whether a complete frame has been received yet. + for len(framedMsg) == 0 { + if len(p.protected) == cap(p.protected) { + tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength) + copy(tmp, p.protected) + p.protected = tmp + } + n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)]) + if err != nil { + return 0, err + } + p.protected = p.protected[:len(p.protected)+n] + framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit) + if err != nil { + return 0, err + } + } + // Now we have a complete frame, decrypted it. + msg := framedMsg[MsgLenFieldSize:] + msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize]) + if msgType&0xff != altsRecordMsgType { + return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v", + msgType, altsRecordMsgType) + } + ciphertext := msg[msgTypeFieldSize:] + + // Decrypt requires that if the dst and ciphertext alias, they + // must alias exactly. Code here used to use msg[:0], but msg + // starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than + // ciphertext, so they alias inexactly. Using ciphertext[:0] + // arranges the appropriate aliasing without needing to copy + // ciphertext or use a separate destination buffer. For more info + // check: https://golang.org/pkg/crypto/cipher/#AEAD. + p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext) + if err != nil { + return 0, err + } + } + + n = copy(b, p.buf) + p.buf = p.buf[n:] + return n, nil +} + +// Write encrypts, frames, and writes bytes from b to the underlying connection. +func (p *conn) Write(b []byte) (n int, err error) { + n = len(b) + // Calculate the output buffer size with framing and encryption overhead. + numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) + size := len(b) + numOfFrames*p.overhead + // If writeBuf is too small, increase its size up to the maximum size. + partialBSize := len(b) + if size > altsWriteBufferMaxSize { + size = altsWriteBufferMaxSize + const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength + partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit + } + if len(p.writeBuf) < size { + p.writeBuf = make([]byte, size) + } + + for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { + partialBEnd := partialBStart + partialBSize + if partialBEnd > len(b) { + partialBEnd = len(b) + } + partialB := b[partialBStart:partialBEnd] + writeBufIndex := 0 + for len(partialB) > 0 { + payloadLen := len(partialB) + if payloadLen > p.payloadLengthLimit { + payloadLen = p.payloadLengthLimit + } + buf := partialB[:payloadLen] + partialB = partialB[payloadLen:] + + // Write buffer contains: length, type, payload, and tag + // if any. + + // 1. Fill in type field. + msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] + binary.LittleEndian.PutUint32(msg, altsRecordMsgType) + + // 2. Encrypt the payload and create a tag if any. + msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf) + if err != nil { + return n, err + } + + // 3. Fill in the size field. + binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) + + // 4. Increase writeBufIndex. + writeBufIndex += len(buf) + p.overhead + } + nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) + if err != nil { + // We need to calculate the actual data size that was + // written. This means we need to remove header, + // encryption overheads, and any partially-written + // frame data. + numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength))) + return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err + } + } + return n, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go new file mode 100644 index 0000000000..84821fa254 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import core "google.golang.org/grpc/credentials/alts/internal" + +// NewOutCounter returns an outgoing counter initialized to the starting sequence +// number for the client/server side of a connection. +func NewOutCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ServerSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// NewInCounter returns an incoming counter initialized to the starting sequence +// number for the client/server side of a connection. This is used in ALTS record +// to check that incoming counters are as expected, since ALTS record guarantees +// that messages are unwrapped in the same order that the peer wrapped them. +func NewInCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ClientSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// CounterFromValue creates a new counter given an initial value. +func CounterFromValue(value []byte, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + copy(c.value[:], value) + return +} + +// CounterSide returns the connection side (client/server) a sequence counter is +// associated with. +func CounterSide(c []byte) core.Side { + if c[counterLen-1]&0x80 == 0x80 { + return core.ServerSide + } + return core.ClientSide +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go new file mode 100644 index 0000000000..49c22c1e89 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -0,0 +1,365 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker provides ALTS handshaking functionality for GCP. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/authinfo" + "google.golang.org/grpc/credentials/alts/internal/conn" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +const ( + // The maximum byte size of receive frames. + frameLimit = 64 * 1024 // 64 KB + rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" + // maxPendingHandshakes represents the maximum number of concurrent + // handshakes. + maxPendingHandshakes = 100 +) + +var ( + hsProtocol = altspb.HandshakeProtocol_ALTS + appProtocols = []string{"grpc"} + recordProtocols = []string{rekeyRecordProtocolName} + keyLength = map[string]int{ + rekeyRecordProtocolName: 44, + } + altsRecordFuncs = map[string]conn.ALTSRecordFunc{ + // ALTS handshaker protocols. + rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) { + return conn.NewAES128GCMRekey(s, keyData) + }, + } + // control number of concurrent created (but not closed) handshakers. + mu sync.Mutex + concurrentHandshakes = int64(0) + // errDropped occurs when maxPendingHandshakes is reached. + errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") +) + +func init() { + for protocol, f := range altsRecordFuncs { + if err := conn.RegisterProtocol(protocol, f); err != nil { + panic(err) + } + } +} + +func acquire(n int64) bool { + mu.Lock() + success := maxPendingHandshakes-concurrentHandshakes >= n + if success { + concurrentHandshakes += n + } + mu.Unlock() + return success +} + +func release(n int64) { + mu.Lock() + concurrentHandshakes -= n + if concurrentHandshakes < 0 { + mu.Unlock() + panic("bad release") + } + mu.Unlock() +} + +// ClientHandshakerOptions contains the client handshaker options that can +// provided by the caller. +type ClientHandshakerOptions struct { + // ClientIdentity is the handshaker client local identity. + ClientIdentity *altspb.Identity + // TargetName is the server service account name for secure name + // checking. + TargetName string + // TargetServiceAccounts contains a list of expected target service + // accounts. One of these accounts should match one of the accounts in + // the handshaker results. Otherwise, the handshake fails. + TargetServiceAccounts []string + // RPCVersions specifies the gRPC versions accepted by the client. + RPCVersions *altspb.RpcProtocolVersions +} + +// ServerHandshakerOptions contains the server handshaker options that can +// provided by the caller. +type ServerHandshakerOptions struct { + // RPCVersions specifies the gRPC versions accepted by the server. + RPCVersions *altspb.RpcProtocolVersions +} + +// DefaultClientHandshakerOptions returns the default client handshaker options. +func DefaultClientHandshakerOptions() *ClientHandshakerOptions { + return &ClientHandshakerOptions{} +} + +// DefaultServerHandshakerOptions returns the default client handshaker options. +func DefaultServerHandshakerOptions() *ServerHandshakerOptions { + return &ServerHandshakerOptions{} +} + +// TODO: add support for future local and remote endpoint in both client options +// and server options (server options struct does not exist now. When +// caller can provide endpoints, it should be created. + +// altsHandshaker is used to complete a ALTS handshaking between client and +// server. This handshaker talks to the ALTS handshaker service in the metadata +// server. +type altsHandshaker struct { + // RPC stream used to access the ALTS Handshaker service. + stream altsgrpc.HandshakerService_DoHandshakeClient + // the connection to the peer. + conn net.Conn + // client handshake options. + clientOpts *ClientHandshakerOptions + // server handshake options. + serverOpts *ServerHandshakerOptions + // defines the side doing the handshake, client or server. + side core.Side +} + +// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + clientOpts: opts, + side: core.ClientSide, + }, nil +} + +// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + serverOpts: opts, + side: core.ServerSide, + }, nil +} + +// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once +// done, ClientHandshake returns a secure connection. +func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire(1) { + return nil, nil, errDropped + } + defer release(1) + + if h.side != core.ClientSide { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") + } + + // Create target identities from service account list. + targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) + for _, account := range h.clientOpts.TargetServiceAccounts { + targetIdentities = append(targetIdentities, &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: account, + }, + }) + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ClientStart{ + ClientStart: &altspb.StartClientHandshakeReq{ + HandshakeSecurityProtocol: hsProtocol, + ApplicationProtocols: appProtocols, + RecordProtocols: recordProtocols, + TargetIdentities: targetIdentities, + LocalIdentity: h.clientOpts.ClientIdentity, + TargetName: h.clientOpts.TargetName, + RpcVersions: h.clientOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once +// done, ServerHandshake returns a secure connection. +func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire(1) { + return nil, nil, errDropped + } + defer release(1) + + if h.side != core.ServerSide { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") + } + + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + + // Prepare server parameters. + // TODO: currently only ALTS parameters are provided. Might need to use + // more options in the future. + params := make(map[int32]*altspb.ServerHandshakeParameters) + params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ + RecordProtocols: recordProtocols, + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ServerStart{ + ServerStart: &altspb.StartServerHandshakeReq{ + ApplicationProtocols: appProtocols, + HandshakeParameters: params, + InBytes: p[:n], + RpcVersions: h.serverOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check of the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + + var extra []byte + if req.GetServerStart() != nil { + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + // The handshaker returns a 128 bytes key. It should be truncated based + // on the returned record protocol. + keyLen, ok := keyLength[result.RecordProtocol] + if !ok { + return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol) + } + sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra) + if err != nil { + return nil, nil, err + } + return sc, result, nil +} + +func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone processes the handshake until the handshaker service returns +// the results. Handshaker service takes care of frame parsing, so we read +// whatever received from the network and send it to the handshaker service. +func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { + for { + if len(resp.OutFrames) > 0 { + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, extra, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service, and + // nothing is received from the peer, then we are stuck. + // This covers the case when the peer is not responding. Note + // that handshaker service connection issues are caught in + // accessHandshakerService before we even get here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, core.PeerNotRespondingError + } + // Append extra bytes from the previous interaction with the + // handshaker service with the current buffer read from conn. + p := append(extra, buf[:n]...) + resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_Next{ + Next: &altspb.NextHandshakeMessageReq{ + InBytes: p, + }, + }, + }) + if err != nil { + return nil, nil, err + } + // Set extra based on handshaker service response. + if n == 0 { + extra = nil + } else { + extra = buf[resp.GetBytesConsumed():n] + } + } +} + +// Close terminates the Handshaker. It should be called when the caller obtains +// the secure connection. +func (h *altsHandshaker) Close() { + h.stream.CloseSend() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go new file mode 100644 index 0000000000..0c7b568354 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service manages connections between the VM application and the ALTS +// handshaker service. +package service + +import ( + "sync" + + grpc "google.golang.org/grpc" +) + +var ( + // hsConn represents a connection to hypervisor handshaker service. + hsConn *grpc.ClientConn + mu sync.Mutex + // hsDialer will be reassigned in tests. + hsDialer = grpc.Dial +) + +// Dial dials the handshake service in the hypervisor. If a connection has +// already been established, this function returns it. Otherwise, a new +// connection is created. +func Dial(hsAddress string) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + if hsConn == nil { + // Create a new connection to the handshaker service. Note that + // this connection stays open until the application is closed. + var err error + hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + if err != nil { + return nil, err + } + } + return hsConn, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go new file mode 100644 index 0000000000..d1793073de --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -0,0 +1,151 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/altscontext.proto + +package grpc_gcp // import "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AltsContext struct { + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // The security level of the created secure channel. + SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"` + // The peer service account. + PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"` + // The local service account. + LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // Additional attributes of the peer. + PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AltsContext) Reset() { *m = AltsContext{} } +func (m *AltsContext) String() string { return proto.CompactTextString(m) } +func (*AltsContext) ProtoMessage() {} +func (*AltsContext) Descriptor() ([]byte, []int) { + return fileDescriptor_altscontext_f6b7868f9a30497f, []int{0} +} +func (m *AltsContext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AltsContext.Unmarshal(m, b) +} +func (m *AltsContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AltsContext.Marshal(b, m, deterministic) +} +func (dst *AltsContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_AltsContext.Merge(dst, src) +} +func (m *AltsContext) XXX_Size() int { + return xxx_messageInfo_AltsContext.Size(m) +} +func (m *AltsContext) XXX_DiscardUnknown() { + xxx_messageInfo_AltsContext.DiscardUnknown(m) +} + +var xxx_messageInfo_AltsContext proto.InternalMessageInfo + +func (m *AltsContext) GetApplicationProtocol() string { + if m != nil { + return m.ApplicationProtocol + } + return "" +} + +func (m *AltsContext) GetRecordProtocol() string { + if m != nil { + return m.RecordProtocol + } + return "" +} + +func (m *AltsContext) GetSecurityLevel() SecurityLevel { + if m != nil { + return m.SecurityLevel + } + return SecurityLevel_SECURITY_NONE +} + +func (m *AltsContext) GetPeerServiceAccount() string { + if m != nil { + return m.PeerServiceAccount + } + return "" +} + +func (m *AltsContext) GetLocalServiceAccount() string { + if m != nil { + return m.LocalServiceAccount + } + return "" +} + +func (m *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.PeerRpcVersions + } + return nil +} + +func (m *AltsContext) GetPeerAttributes() map[string]string { + if m != nil { + return m.PeerAttributes + } + return nil +} + +func init() { + proto.RegisterType((*AltsContext)(nil), "grpc.gcp.AltsContext") + proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.AltsContext.PeerAttributesEntry") +} + +func init() { + proto.RegisterFile("grpc/gcp/altscontext.proto", fileDescriptor_altscontext_f6b7868f9a30497f) +} + +var fileDescriptor_altscontext_f6b7868f9a30497f = []byte{ + // 411 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4d, 0x6f, 0x13, 0x31, + 0x10, 0x86, 0xb5, 0x0d, 0x2d, 0xe0, 0x88, 0xb4, 0xb8, 0xa9, 0x58, 0x45, 0x42, 0x8a, 0xb8, 0xb0, + 0x5c, 0x76, 0x21, 0x5c, 0x10, 0x07, 0x50, 0x8a, 0x38, 0x20, 0x71, 0x88, 0xb6, 0x12, 0x07, 0x2e, + 0x2b, 0x77, 0x3a, 0xb2, 0x2c, 0x5c, 0x8f, 0x35, 0x76, 0x22, 0xf2, 0xb3, 0xf9, 0x07, 0x68, 0xed, + 0xcd, 0x07, 0x1f, 0xb7, 0x9d, 0x79, 0x9f, 0x19, 0xbf, 0xb3, 0x33, 0x62, 0xa6, 0xd9, 0x43, 0xa3, + 0xc1, 0x37, 0xca, 0xc6, 0x00, 0xe4, 0x22, 0xfe, 0x8c, 0xb5, 0x67, 0x8a, 0x24, 0x1f, 0xf5, 0x5a, + 0xad, 0xc1, 0xcf, 0xaa, 0x3d, 0x15, 0x59, 0xb9, 0xe0, 0x89, 0x63, 0x17, 0x10, 0xd6, 0x6c, 0xe2, + 0xb6, 0x03, 0xba, 0xbf, 0x27, 0x97, 0x6b, 0x5e, 0xfc, 0x1a, 0x89, 0xf1, 0xd2, 0xc6, 0xf0, 0x29, + 0x77, 0x92, 0x6f, 0xc4, 0x54, 0x79, 0x6f, 0x0d, 0xa8, 0x68, 0xc8, 0x75, 0x09, 0x02, 0xb2, 0x65, + 0x31, 0x2f, 0xaa, 0xc7, 0xed, 0xe5, 0x91, 0xb6, 0x1a, 0x24, 0xf9, 0x52, 0x9c, 0x33, 0x02, 0xf1, + 0xdd, 0x81, 0x3e, 0x49, 0xf4, 0x24, 0xa7, 0xf7, 0xe0, 0x07, 0x31, 0xd9, 0x9b, 0xb0, 0xb8, 0x41, + 0x5b, 0x8e, 0xe6, 0x45, 0x35, 0x59, 0x3c, 0xab, 0x77, 0xc6, 0xeb, 0x9b, 0x41, 0xff, 0xda, 0xcb, + 0xed, 0x93, 0x70, 0x1c, 0xca, 0xd7, 0x62, 0xea, 0x11, 0xb9, 0x0b, 0xc8, 0x1b, 0x03, 0xd8, 0x29, + 0x00, 0x5a, 0xbb, 0x58, 0x3e, 0x48, 0xaf, 0xc9, 0x5e, 0xbb, 0xc9, 0xd2, 0x32, 0x2b, 0x72, 0x21, + 0xae, 0x2c, 0x81, 0xb2, 0xff, 0x94, 0x9c, 0xe6, 0x71, 0x92, 0xf8, 0x57, 0xcd, 0x17, 0xf1, 0x34, + 0xbd, 0xc2, 0x1e, 0xba, 0x0d, 0x72, 0x30, 0xe4, 0x42, 0x79, 0x36, 0x2f, 0xaa, 0xf1, 0xe2, 0xf9, + 0xc1, 0x68, 0xeb, 0x61, 0x37, 0xd7, 0xb7, 0x01, 0x6a, 0xcf, 0xfb, 0xba, 0xd6, 0xc3, 0x2e, 0x21, + 0x5b, 0x91, 0x52, 0x9d, 0x8a, 0x91, 0xcd, 0xed, 0x3a, 0x62, 0x28, 0x1f, 0xce, 0x47, 0xd5, 0x78, + 0xf1, 0xea, 0xd0, 0xe8, 0xe8, 0xe7, 0xd7, 0x2b, 0x44, 0x5e, 0xee, 0xd9, 0xcf, 0x2e, 0xf2, 0xb6, + 0x9d, 0xf8, 0x3f, 0x92, 0xb3, 0xa5, 0xb8, 0xfc, 0x0f, 0x26, 0x2f, 0xc4, 0xe8, 0x07, 0x6e, 0x87, + 0x35, 0xf5, 0x9f, 0x72, 0x2a, 0x4e, 0x37, 0xca, 0xae, 0x71, 0x58, 0x46, 0x0e, 0xde, 0x9f, 0xbc, + 0x2b, 0xae, 0xad, 0xb8, 0x32, 0x94, 0x1d, 0xf4, 0x47, 0x54, 0x1b, 0x17, 0x91, 0x9d, 0xb2, 0xd7, + 0x17, 0x47, 0x66, 0xd2, 0x74, 0xab, 0xe2, 0xfb, 0x47, 0x4d, 0xa4, 0x2d, 0xd6, 0x9a, 0xac, 0x72, + 0xba, 0x26, 0xd6, 0x4d, 0x3a, 0x2e, 0x60, 0xbc, 0x43, 0x17, 0x8d, 0xb2, 0x21, 0x9d, 0x62, 0xb3, + 0xeb, 0xd2, 0xa4, 0x2b, 0x48, 0x50, 0xa7, 0xc1, 0xdf, 0x9e, 0xa5, 0xf8, 0xed, 0xef, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x9b, 0x8c, 0xe4, 0x6a, 0xba, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go new file mode 100644 index 0000000000..0c37ba2abe --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -0,0 +1,1196 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/handshaker.proto + +package grpc_gcp // import "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HandshakeProtocol int32 + +const ( + // Default value. + HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED HandshakeProtocol = 0 + // TLS handshake protocol. + HandshakeProtocol_TLS HandshakeProtocol = 1 + // Application Layer Transport Security handshake protocol. + HandshakeProtocol_ALTS HandshakeProtocol = 2 +) + +var HandshakeProtocol_name = map[int32]string{ + 0: "HANDSHAKE_PROTOCOL_UNSPECIFIED", + 1: "TLS", + 2: "ALTS", +} +var HandshakeProtocol_value = map[string]int32{ + "HANDSHAKE_PROTOCOL_UNSPECIFIED": 0, + "TLS": 1, + "ALTS": 2, +} + +func (x HandshakeProtocol) String() string { + return proto.EnumName(HandshakeProtocol_name, int32(x)) +} +func (HandshakeProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{0} +} + +type NetworkProtocol int32 + +const ( + NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED NetworkProtocol = 0 + NetworkProtocol_TCP NetworkProtocol = 1 + NetworkProtocol_UDP NetworkProtocol = 2 +) + +var NetworkProtocol_name = map[int32]string{ + 0: "NETWORK_PROTOCOL_UNSPECIFIED", + 1: "TCP", + 2: "UDP", +} +var NetworkProtocol_value = map[string]int32{ + "NETWORK_PROTOCOL_UNSPECIFIED": 0, + "TCP": 1, + "UDP": 2, +} + +func (x NetworkProtocol) String() string { + return proto.EnumName(NetworkProtocol_name, int32(x)) +} +func (NetworkProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{1} +} + +type Endpoint struct { + // IP address. It should contain an IPv4 or IPv6 string literal, e.g. + // "192.168.0.1" or "2001:db8::1". + IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // Port number. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // Network protocol (e.g., TCP, UDP) associated with this endpoint. + Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (m *Endpoint) String() string { return proto.CompactTextString(m) } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Endpoint.Unmarshal(m, b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) +} +func (dst *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(dst, src) +} +func (m *Endpoint) XXX_Size() int { + return xxx_messageInfo_Endpoint.Size(m) +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *Endpoint) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func (m *Endpoint) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Endpoint) GetProtocol() NetworkProtocol { + if m != nil { + return m.Protocol + } + return NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED +} + +type Identity struct { + // Types that are valid to be assigned to IdentityOneof: + // *Identity_ServiceAccount + // *Identity_Hostname + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional attributes of the identity. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Identity) Reset() { *m = Identity{} } +func (m *Identity) String() string { return proto.CompactTextString(m) } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{1} +} +func (m *Identity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Identity.Unmarshal(m, b) +} +func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Identity.Marshal(b, m, deterministic) +} +func (dst *Identity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Identity.Merge(dst, src) +} +func (m *Identity) XXX_Size() int { + return xxx_messageInfo_Identity.Size(m) +} +func (m *Identity) XXX_DiscardUnknown() { + xxx_messageInfo_Identity.DiscardUnknown(m) +} + +var xxx_messageInfo_Identity proto.InternalMessageInfo + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_ServiceAccount struct { + ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount,proto3,oneof"` +} + +type Identity_Hostname struct { + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (m *Identity) GetServiceAccount() string { + if x, ok := m.GetIdentityOneof().(*Identity_ServiceAccount); ok { + return x.ServiceAccount + } + return "" +} + +func (m *Identity) GetHostname() string { + if x, ok := m.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (m *Identity) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Identity) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Identity_OneofMarshaler, _Identity_OneofUnmarshaler, _Identity_OneofSizer, []interface{}{ + (*Identity_ServiceAccount)(nil), + (*Identity_Hostname)(nil), + } +} + +func _Identity_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Identity) + // identity_oneof + switch x := m.IdentityOneof.(type) { + case *Identity_ServiceAccount: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ServiceAccount) + case *Identity_Hostname: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Hostname) + case nil: + default: + return fmt.Errorf("Identity.IdentityOneof has unexpected type %T", x) + } + return nil +} + +func _Identity_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Identity) + switch tag { + case 1: // identity_oneof.service_account + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.IdentityOneof = &Identity_ServiceAccount{x} + return true, err + case 2: // identity_oneof.hostname + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.IdentityOneof = &Identity_Hostname{x} + return true, err + default: + return false, nil + } +} + +func _Identity_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Identity) + // identity_oneof + switch x := m.IdentityOneof.(type) { + case *Identity_ServiceAccount: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ServiceAccount))) + n += len(x.ServiceAccount) + case *Identity_Hostname: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Hostname))) + n += len(x.Hostname) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type StartClientHandshakeReq struct { + // Handshake security protocol requested by the client. + HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"` + // The application protocols supported by the client, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,2,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // The record protocols supported by the client, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,3,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, handshake will fail. + TargetIdentities []*Identity `protobuf:"bytes,4,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, the + // handshaker chooses a default local identity. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // (Optional) Local endpoint information of the connection to the server, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,6,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote server, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,7,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) If target name is provided, a secure naming check is performed + // to verify that the peer authenticated identity is indeed authorized to run + // the target name. + TargetName string `protobuf:"bytes,8,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` + // (Optional) RPC protocol versions supported by the client. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClientHandshakeReq) Reset() { *m = StartClientHandshakeReq{} } +func (m *StartClientHandshakeReq) String() string { return proto.CompactTextString(m) } +func (*StartClientHandshakeReq) ProtoMessage() {} +func (*StartClientHandshakeReq) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{2} +} +func (m *StartClientHandshakeReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClientHandshakeReq.Unmarshal(m, b) +} +func (m *StartClientHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClientHandshakeReq.Marshal(b, m, deterministic) +} +func (dst *StartClientHandshakeReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClientHandshakeReq.Merge(dst, src) +} +func (m *StartClientHandshakeReq) XXX_Size() int { + return xxx_messageInfo_StartClientHandshakeReq.Size(m) +} +func (m *StartClientHandshakeReq) XXX_DiscardUnknown() { + xxx_messageInfo_StartClientHandshakeReq.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClientHandshakeReq proto.InternalMessageInfo + +func (m *StartClientHandshakeReq) GetHandshakeSecurityProtocol() HandshakeProtocol { + if m != nil { + return m.HandshakeSecurityProtocol + } + return HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED +} + +func (m *StartClientHandshakeReq) GetApplicationProtocols() []string { + if m != nil { + return m.ApplicationProtocols + } + return nil +} + +func (m *StartClientHandshakeReq) GetRecordProtocols() []string { + if m != nil { + return m.RecordProtocols + } + return nil +} + +func (m *StartClientHandshakeReq) GetTargetIdentities() []*Identity { + if m != nil { + return m.TargetIdentities + } + return nil +} + +func (m *StartClientHandshakeReq) GetLocalIdentity() *Identity { + if m != nil { + return m.LocalIdentity + } + return nil +} + +func (m *StartClientHandshakeReq) GetLocalEndpoint() *Endpoint { + if m != nil { + return m.LocalEndpoint + } + return nil +} + +func (m *StartClientHandshakeReq) GetRemoteEndpoint() *Endpoint { + if m != nil { + return m.RemoteEndpoint + } + return nil +} + +func (m *StartClientHandshakeReq) GetTargetName() string { + if m != nil { + return m.TargetName + } + return "" +} + +func (m *StartClientHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.RpcVersions + } + return nil +} + +type ServerHandshakeParameters struct { + // The record protocols supported by the server, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, the handshaker chooses a default local identity. + LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerHandshakeParameters) Reset() { *m = ServerHandshakeParameters{} } +func (m *ServerHandshakeParameters) String() string { return proto.CompactTextString(m) } +func (*ServerHandshakeParameters) ProtoMessage() {} +func (*ServerHandshakeParameters) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{3} +} +func (m *ServerHandshakeParameters) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerHandshakeParameters.Unmarshal(m, b) +} +func (m *ServerHandshakeParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerHandshakeParameters.Marshal(b, m, deterministic) +} +func (dst *ServerHandshakeParameters) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHandshakeParameters.Merge(dst, src) +} +func (m *ServerHandshakeParameters) XXX_Size() int { + return xxx_messageInfo_ServerHandshakeParameters.Size(m) +} +func (m *ServerHandshakeParameters) XXX_DiscardUnknown() { + xxx_messageInfo_ServerHandshakeParameters.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerHandshakeParameters proto.InternalMessageInfo + +func (m *ServerHandshakeParameters) GetRecordProtocols() []string { + if m != nil { + return m.RecordProtocols + } + return nil +} + +func (m *ServerHandshakeParameters) GetLocalIdentities() []*Identity { + if m != nil { + return m.LocalIdentities + } + return nil +} + +type StartServerHandshakeReq struct { + // The application protocols supported by the server, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // Handshake parameters (record protocols and local identities supported by + // the server) mapped by the handshake protocol. Each handshake security + // protocol (e.g., TLS or ALTS) has its own set of record protocols and local + // identities. Since protobuf does not support enum as key to the map, the key + // to handshake_parameters is the integer value of HandshakeProtocol enum. + HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple HandshakReq messages. + InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // (Optional) Local endpoint information of the connection to the client, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,4,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote client, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,5,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) RPC protocol versions supported by the server. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartServerHandshakeReq) Reset() { *m = StartServerHandshakeReq{} } +func (m *StartServerHandshakeReq) String() string { return proto.CompactTextString(m) } +func (*StartServerHandshakeReq) ProtoMessage() {} +func (*StartServerHandshakeReq) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{4} +} +func (m *StartServerHandshakeReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartServerHandshakeReq.Unmarshal(m, b) +} +func (m *StartServerHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartServerHandshakeReq.Marshal(b, m, deterministic) +} +func (dst *StartServerHandshakeReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartServerHandshakeReq.Merge(dst, src) +} +func (m *StartServerHandshakeReq) XXX_Size() int { + return xxx_messageInfo_StartServerHandshakeReq.Size(m) +} +func (m *StartServerHandshakeReq) XXX_DiscardUnknown() { + xxx_messageInfo_StartServerHandshakeReq.DiscardUnknown(m) +} + +var xxx_messageInfo_StartServerHandshakeReq proto.InternalMessageInfo + +func (m *StartServerHandshakeReq) GetApplicationProtocols() []string { + if m != nil { + return m.ApplicationProtocols + } + return nil +} + +func (m *StartServerHandshakeReq) GetHandshakeParameters() map[int32]*ServerHandshakeParameters { + if m != nil { + return m.HandshakeParameters + } + return nil +} + +func (m *StartServerHandshakeReq) GetInBytes() []byte { + if m != nil { + return m.InBytes + } + return nil +} + +func (m *StartServerHandshakeReq) GetLocalEndpoint() *Endpoint { + if m != nil { + return m.LocalEndpoint + } + return nil +} + +func (m *StartServerHandshakeReq) GetRemoteEndpoint() *Endpoint { + if m != nil { + return m.RemoteEndpoint + } + return nil +} + +func (m *StartServerHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.RpcVersions + } + return nil +} + +type NextHandshakeMessageReq struct { + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple NextHandshakerMessageReq + // messages. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NextHandshakeMessageReq) Reset() { *m = NextHandshakeMessageReq{} } +func (m *NextHandshakeMessageReq) String() string { return proto.CompactTextString(m) } +func (*NextHandshakeMessageReq) ProtoMessage() {} +func (*NextHandshakeMessageReq) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{5} +} +func (m *NextHandshakeMessageReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NextHandshakeMessageReq.Unmarshal(m, b) +} +func (m *NextHandshakeMessageReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NextHandshakeMessageReq.Marshal(b, m, deterministic) +} +func (dst *NextHandshakeMessageReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextHandshakeMessageReq.Merge(dst, src) +} +func (m *NextHandshakeMessageReq) XXX_Size() int { + return xxx_messageInfo_NextHandshakeMessageReq.Size(m) +} +func (m *NextHandshakeMessageReq) XXX_DiscardUnknown() { + xxx_messageInfo_NextHandshakeMessageReq.DiscardUnknown(m) +} + +var xxx_messageInfo_NextHandshakeMessageReq proto.InternalMessageInfo + +func (m *NextHandshakeMessageReq) GetInBytes() []byte { + if m != nil { + return m.InBytes + } + return nil +} + +type HandshakerReq struct { + // Types that are valid to be assigned to ReqOneof: + // *HandshakerReq_ClientStart + // *HandshakerReq_ServerStart + // *HandshakerReq_Next + ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerReq) Reset() { *m = HandshakerReq{} } +func (m *HandshakerReq) String() string { return proto.CompactTextString(m) } +func (*HandshakerReq) ProtoMessage() {} +func (*HandshakerReq) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{6} +} +func (m *HandshakerReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerReq.Unmarshal(m, b) +} +func (m *HandshakerReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerReq.Marshal(b, m, deterministic) +} +func (dst *HandshakerReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerReq.Merge(dst, src) +} +func (m *HandshakerReq) XXX_Size() int { + return xxx_messageInfo_HandshakerReq.Size(m) +} +func (m *HandshakerReq) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerReq.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerReq proto.InternalMessageInfo + +type isHandshakerReq_ReqOneof interface { + isHandshakerReq_ReqOneof() +} + +type HandshakerReq_ClientStart struct { + ClientStart *StartClientHandshakeReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type HandshakerReq_ServerStart struct { + ServerStart *StartServerHandshakeReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type HandshakerReq_Next struct { + Next *NextHandshakeMessageReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +func (*HandshakerReq_ClientStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {} + +func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (m *HandshakerReq) GetClientStart() *StartClientHandshakeReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (m *HandshakerReq) GetServerStart() *StartServerHandshakeReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (m *HandshakerReq) GetNext() *NextHandshakeMessageReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_Next); ok { + return x.Next + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HandshakerReq) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HandshakerReq_OneofMarshaler, _HandshakerReq_OneofUnmarshaler, _HandshakerReq_OneofSizer, []interface{}{ + (*HandshakerReq_ClientStart)(nil), + (*HandshakerReq_ServerStart)(nil), + (*HandshakerReq_Next)(nil), + } +} + +func _HandshakerReq_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HandshakerReq) + // req_oneof + switch x := m.ReqOneof.(type) { + case *HandshakerReq_ClientStart: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientStart); err != nil { + return err + } + case *HandshakerReq_ServerStart: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerStart); err != nil { + return err + } + case *HandshakerReq_Next: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Next); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HandshakerReq.ReqOneof has unexpected type %T", x) + } + return nil +} + +func _HandshakerReq_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HandshakerReq) + switch tag { + case 1: // req_oneof.client_start + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StartClientHandshakeReq) + err := b.DecodeMessage(msg) + m.ReqOneof = &HandshakerReq_ClientStart{msg} + return true, err + case 2: // req_oneof.server_start + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StartServerHandshakeReq) + err := b.DecodeMessage(msg) + m.ReqOneof = &HandshakerReq_ServerStart{msg} + return true, err + case 3: // req_oneof.next + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NextHandshakeMessageReq) + err := b.DecodeMessage(msg) + m.ReqOneof = &HandshakerReq_Next{msg} + return true, err + default: + return false, nil + } +} + +func _HandshakerReq_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HandshakerReq) + // req_oneof + switch x := m.ReqOneof.(type) { + case *HandshakerReq_ClientStart: + s := proto.Size(x.ClientStart) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *HandshakerReq_ServerStart: + s := proto.Size(x.ServerStart) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *HandshakerReq_Next: + s := proto.Size(x.Next) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type HandshakerResult struct { + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // Cryptographic key data. The key data may be more than the key length + // required for the record protocol, thus the client of the handshaker + // service needs to truncate the key data into the right key length. + KeyData []byte `protobuf:"bytes,3,opt,name=key_data,json=keyData,proto3" json:"key_data,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used in the handshake. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // Indicate whether the handshaker service client should keep the channel + // between the handshaker service open, e.g., in order to handle + // post-handshake messages in the future. + KeepChannelOpen bool `protobuf:"varint,6,opt,name=keep_channel_open,json=keepChannelOpen,proto3" json:"keep_channel_open,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerResult) Reset() { *m = HandshakerResult{} } +func (m *HandshakerResult) String() string { return proto.CompactTextString(m) } +func (*HandshakerResult) ProtoMessage() {} +func (*HandshakerResult) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{7} +} +func (m *HandshakerResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerResult.Unmarshal(m, b) +} +func (m *HandshakerResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerResult.Marshal(b, m, deterministic) +} +func (dst *HandshakerResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerResult.Merge(dst, src) +} +func (m *HandshakerResult) XXX_Size() int { + return xxx_messageInfo_HandshakerResult.Size(m) +} +func (m *HandshakerResult) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerResult.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerResult proto.InternalMessageInfo + +func (m *HandshakerResult) GetApplicationProtocol() string { + if m != nil { + return m.ApplicationProtocol + } + return "" +} + +func (m *HandshakerResult) GetRecordProtocol() string { + if m != nil { + return m.RecordProtocol + } + return "" +} + +func (m *HandshakerResult) GetKeyData() []byte { + if m != nil { + return m.KeyData + } + return nil +} + +func (m *HandshakerResult) GetPeerIdentity() *Identity { + if m != nil { + return m.PeerIdentity + } + return nil +} + +func (m *HandshakerResult) GetLocalIdentity() *Identity { + if m != nil { + return m.LocalIdentity + } + return nil +} + +func (m *HandshakerResult) GetKeepChannelOpen() bool { + if m != nil { + return m.KeepChannelOpen + } + return false +} + +func (m *HandshakerResult) GetPeerRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.PeerRpcVersions + } + return nil +} + +type HandshakerStatus struct { + // The status code. This could be the gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerStatus) Reset() { *m = HandshakerStatus{} } +func (m *HandshakerStatus) String() string { return proto.CompactTextString(m) } +func (*HandshakerStatus) ProtoMessage() {} +func (*HandshakerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{8} +} +func (m *HandshakerStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerStatus.Unmarshal(m, b) +} +func (m *HandshakerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerStatus.Marshal(b, m, deterministic) +} +func (dst *HandshakerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerStatus.Merge(dst, src) +} +func (m *HandshakerStatus) XXX_Size() int { + return xxx_messageInfo_HandshakerStatus.Size(m) +} +func (m *HandshakerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerStatus proto.InternalMessageInfo + +func (m *HandshakerStatus) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *HandshakerStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +type HandshakerResp struct { + // Frames to be given to the peer for the NextHandshakeMessageReq. May be + // empty if no out_frames have to be sent to the peer or if in_bytes in the + // HandshakerReq are incomplete. All the non-empty out frames must be sent to + // the peer even if the handshaker status is not OK as these frames may + // contain the alert frames. + OutFrames []byte `protobuf:"bytes,1,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes consumed by the handshaker. It is possible + // that part of in_bytes in HandshakerReq was unrelated to the handshake + // process. + BytesConsumed uint32 `protobuf:"varint,2,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set iff the handshake was successful. out_frames may still be set + // to frames that needs to be forwarded to the peer. + Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + // Status of the handshaker. + Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerResp) Reset() { *m = HandshakerResp{} } +func (m *HandshakerResp) String() string { return proto.CompactTextString(m) } +func (*HandshakerResp) ProtoMessage() {} +func (*HandshakerResp) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{9} +} +func (m *HandshakerResp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerResp.Unmarshal(m, b) +} +func (m *HandshakerResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerResp.Marshal(b, m, deterministic) +} +func (dst *HandshakerResp) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerResp.Merge(dst, src) +} +func (m *HandshakerResp) XXX_Size() int { + return xxx_messageInfo_HandshakerResp.Size(m) +} +func (m *HandshakerResp) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerResp.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerResp proto.InternalMessageInfo + +func (m *HandshakerResp) GetOutFrames() []byte { + if m != nil { + return m.OutFrames + } + return nil +} + +func (m *HandshakerResp) GetBytesConsumed() uint32 { + if m != nil { + return m.BytesConsumed + } + return 0 +} + +func (m *HandshakerResp) GetResult() *HandshakerResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *HandshakerResp) GetStatus() *HandshakerStatus { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterType((*Endpoint)(nil), "grpc.gcp.Endpoint") + proto.RegisterType((*Identity)(nil), "grpc.gcp.Identity") + proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.Identity.AttributesEntry") + proto.RegisterType((*StartClientHandshakeReq)(nil), "grpc.gcp.StartClientHandshakeReq") + proto.RegisterType((*ServerHandshakeParameters)(nil), "grpc.gcp.ServerHandshakeParameters") + proto.RegisterType((*StartServerHandshakeReq)(nil), "grpc.gcp.StartServerHandshakeReq") + proto.RegisterMapType((map[int32]*ServerHandshakeParameters)(nil), "grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry") + proto.RegisterType((*NextHandshakeMessageReq)(nil), "grpc.gcp.NextHandshakeMessageReq") + proto.RegisterType((*HandshakerReq)(nil), "grpc.gcp.HandshakerReq") + proto.RegisterType((*HandshakerResult)(nil), "grpc.gcp.HandshakerResult") + proto.RegisterType((*HandshakerStatus)(nil), "grpc.gcp.HandshakerStatus") + proto.RegisterType((*HandshakerResp)(nil), "grpc.gcp.HandshakerResp") + proto.RegisterEnum("grpc.gcp.HandshakeProtocol", HandshakeProtocol_name, HandshakeProtocol_value) + proto.RegisterEnum("grpc.gcp.NetworkProtocol", NetworkProtocol_name, NetworkProtocol_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// HandshakerServiceClient is the client API for HandshakerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type HandshakerServiceClient interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) +} + +type handshakerServiceClient struct { + cc *grpc.ClientConn +} + +func NewHandshakerServiceClient(cc *grpc.ClientConn) HandshakerServiceClient { + return &handshakerServiceClient{cc} +} + +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { + stream, err := c.cc.NewStream(ctx, &_HandshakerService_serviceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) + if err != nil { + return nil, err + } + x := &handshakerServiceDoHandshakeClient{stream} + return x, nil +} + +type HandshakerService_DoHandshakeClient interface { + Send(*HandshakerReq) error + Recv() (*HandshakerResp, error) + grpc.ClientStream +} + +type handshakerServiceDoHandshakeClient struct { + grpc.ClientStream +} + +func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { + m := new(HandshakerResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HandshakerServiceServer is the server API for HandshakerService service. +type HandshakerServiceServer interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(HandshakerService_DoHandshakeServer) error +} + +func RegisterHandshakerServiceServer(s *grpc.Server, srv HandshakerServiceServer) { + s.RegisterService(&_HandshakerService_serviceDesc, srv) +} + +func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) +} + +type HandshakerService_DoHandshakeServer interface { + Send(*HandshakerResp) error + Recv() (*HandshakerReq, error) + grpc.ServerStream +} + +type handshakerServiceDoHandshakeServer struct { + grpc.ServerStream +} + +func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { + m := new(HandshakerReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _HandshakerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.gcp.HandshakerService", + HandlerType: (*HandshakerServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DoHandshake", + Handler: _HandshakerService_DoHandshake_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/gcp/handshaker.proto", +} + +func init() { + proto.RegisterFile("grpc/gcp/handshaker.proto", fileDescriptor_handshaker_1dfe659b12ea825e) +} + +var fileDescriptor_handshaker_1dfe659b12ea825e = []byte{ + // 1168 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdf, 0x6e, 0x1a, 0xc7, + 0x17, 0xf6, 0x02, 0xb6, 0xf1, 0xc1, 0xfc, 0xf1, 0xc4, 0x51, 0xd6, 0x4e, 0xf2, 0xfb, 0x51, 0xaa, + 0xaa, 0x24, 0x17, 0xd0, 0x92, 0x56, 0x69, 0x52, 0x45, 0x09, 0x60, 0x2c, 0xdc, 0xa4, 0x18, 0x2d, + 0x4e, 0x2b, 0x35, 0x17, 0xab, 0xc9, 0x32, 0xc1, 0x2b, 0x96, 0x99, 0xf5, 0xcc, 0xe0, 0x86, 0x07, + 0xe8, 0xe3, 0xf4, 0x15, 0xfa, 0x36, 0x95, 0xfa, 0x00, 0xbd, 0x6f, 0xb5, 0xb3, 0xb3, 0x7f, 0xc0, + 0x10, 0x25, 0xea, 0xdd, 0xee, 0x99, 0xef, 0x3b, 0x7b, 0xe6, 0x3b, 0xdf, 0x9c, 0x1d, 0x38, 0x9a, + 0x70, 0xdf, 0x69, 0x4e, 0x1c, 0xbf, 0x79, 0x89, 0xe9, 0x58, 0x5c, 0xe2, 0x29, 0xe1, 0x0d, 0x9f, + 0x33, 0xc9, 0x50, 0x3e, 0x58, 0x6a, 0x4c, 0x1c, 0xff, 0xb8, 0x1e, 0x83, 0x24, 0xc7, 0x54, 0xf8, + 0x8c, 0x4b, 0x5b, 0x10, 0x67, 0xce, 0x5d, 0xb9, 0xb0, 0x1d, 0x36, 0x9b, 0x31, 0x1a, 0x72, 0x6a, + 0x12, 0xf2, 0x3d, 0x3a, 0xf6, 0x99, 0x4b, 0x25, 0xba, 0x0f, 0xe0, 0xfa, 0x36, 0x1e, 0x8f, 0x39, + 0x11, 0xc2, 0x34, 0xaa, 0x46, 0x7d, 0xcf, 0xda, 0x73, 0xfd, 0x76, 0x18, 0x40, 0x08, 0x72, 0x41, + 0x22, 0x33, 0x53, 0x35, 0xea, 0xdb, 0x96, 0x7a, 0x46, 0xdf, 0x42, 0x5e, 0xe5, 0x71, 0x98, 0x67, + 0x66, 0xab, 0x46, 0xbd, 0xd4, 0x3a, 0x6a, 0x44, 0x55, 0x34, 0x06, 0x44, 0xfe, 0xca, 0xf8, 0x74, + 0xa8, 0x01, 0x56, 0x0c, 0xad, 0xfd, 0x65, 0x40, 0xfe, 0x6c, 0x4c, 0xa8, 0x74, 0xe5, 0x02, 0x3d, + 0x80, 0xb2, 0x20, 0xfc, 0xda, 0x75, 0x88, 0x8d, 0x1d, 0x87, 0xcd, 0xa9, 0x0c, 0xbf, 0xdd, 0xdf, + 0xb2, 0x4a, 0x7a, 0xa1, 0x1d, 0xc6, 0xd1, 0x3d, 0xc8, 0x5f, 0x32, 0x21, 0x29, 0x9e, 0x11, 0x55, + 0x46, 0x80, 0x89, 0x23, 0xa8, 0x03, 0x80, 0xa5, 0xe4, 0xee, 0xdb, 0xb9, 0x24, 0xc2, 0xcc, 0x56, + 0xb3, 0xf5, 0x42, 0xab, 0x96, 0x94, 0x13, 0x7d, 0xb0, 0xd1, 0x8e, 0x41, 0x3d, 0x2a, 0xf9, 0xc2, + 0x4a, 0xb1, 0x8e, 0x9f, 0x41, 0x79, 0x65, 0x19, 0x55, 0x20, 0x3b, 0x25, 0x0b, 0xad, 0x47, 0xf0, + 0x88, 0x0e, 0x61, 0xfb, 0x1a, 0x7b, 0x73, 0x5d, 0x83, 0x15, 0xbe, 0x3c, 0xcd, 0x7c, 0x67, 0x74, + 0x2a, 0x50, 0x72, 0xf5, 0x67, 0x6c, 0x46, 0x09, 0x7b, 0x57, 0xfb, 0x3d, 0x07, 0x77, 0x46, 0x12, + 0x73, 0xd9, 0xf5, 0x5c, 0x42, 0x65, 0x3f, 0x6a, 0x9a, 0x45, 0xae, 0xd0, 0x1b, 0xb8, 0x1b, 0x37, + 0x31, 0xe9, 0x4f, 0x2c, 0xa8, 0xa1, 0x04, 0xbd, 0x9b, 0xec, 0x20, 0x26, 0xc7, 0x92, 0x1e, 0xc5, + 0xfc, 0x91, 0xa6, 0x47, 0x4b, 0xe8, 0x11, 0xdc, 0xc6, 0xbe, 0xef, 0xb9, 0x0e, 0x96, 0x2e, 0xa3, + 0x71, 0x56, 0x61, 0x66, 0xaa, 0xd9, 0xfa, 0x9e, 0x75, 0x98, 0x5a, 0x8c, 0x38, 0x02, 0x3d, 0x80, + 0x0a, 0x27, 0x0e, 0xe3, 0xe3, 0x14, 0x3e, 0xab, 0xf0, 0xe5, 0x30, 0x9e, 0x40, 0x9f, 0xc3, 0x81, + 0xc4, 0x7c, 0x42, 0xa4, 0xad, 0x77, 0xec, 0x12, 0x61, 0xe6, 0x94, 0xe8, 0xe8, 0xa6, 0xe8, 0x56, + 0x25, 0x04, 0x9f, 0xc5, 0x58, 0xf4, 0x04, 0x4a, 0x1e, 0x73, 0xb0, 0x17, 0xf1, 0x17, 0xe6, 0x76, + 0xd5, 0xd8, 0xc0, 0x2e, 0x2a, 0x64, 0x6c, 0x99, 0x98, 0x4a, 0xb4, 0x77, 0xcd, 0x9d, 0x55, 0x6a, + 0xe4, 0x6a, 0x4d, 0x8d, 0x4d, 0xfe, 0x3d, 0x94, 0x39, 0x99, 0x31, 0x49, 0x12, 0xee, 0xee, 0x46, + 0x6e, 0x29, 0x84, 0xc6, 0xe4, 0xff, 0x43, 0x41, 0xef, 0x59, 0x59, 0x30, 0xaf, 0xda, 0x0f, 0x61, + 0x68, 0x10, 0x58, 0xf0, 0x05, 0xec, 0x73, 0xdf, 0xb1, 0xaf, 0x09, 0x17, 0x2e, 0xa3, 0xc2, 0xdc, + 0x53, 0xa9, 0xef, 0x27, 0xa9, 0x2d, 0xdf, 0x89, 0x24, 0xfc, 0x49, 0x83, 0xac, 0x02, 0xf7, 0x9d, + 0xe8, 0xa5, 0xf6, 0x9b, 0x01, 0x47, 0x23, 0xc2, 0xaf, 0x09, 0x4f, 0xba, 0x8d, 0x39, 0x9e, 0x11, + 0x49, 0xf8, 0xfa, 0xfe, 0x18, 0xeb, 0xfb, 0xf3, 0x0c, 0x2a, 0x4b, 0xf2, 0x06, 0xed, 0xc9, 0x6c, + 0x6c, 0x4f, 0x39, 0x2d, 0xb0, 0x4b, 0x44, 0xed, 0x9f, 0xac, 0xf6, 0xed, 0x4a, 0x31, 0x81, 0x6f, + 0x37, 0x5a, 0xcb, 0xf8, 0x80, 0xb5, 0x66, 0x70, 0x98, 0x98, 0xdd, 0x8f, 0xb7, 0xa4, 0x6b, 0x7a, + 0x9a, 0xd4, 0xb4, 0xe1, 0xab, 0x8d, 0x35, 0x7a, 0x84, 0xe7, 0xf7, 0xd6, 0xe5, 0x1a, 0xa5, 0x8e, + 0x20, 0xef, 0x52, 0xfb, 0xed, 0x22, 0x1c, 0x05, 0x46, 0x7d, 0xdf, 0xda, 0x75, 0x69, 0x27, 0x78, + 0x5d, 0xe3, 0x9e, 0xdc, 0x7f, 0x70, 0xcf, 0xf6, 0x47, 0xbb, 0x67, 0xd5, 0x1c, 0x3b, 0x9f, 0x6a, + 0x8e, 0xe3, 0x29, 0x98, 0x9b, 0x54, 0x48, 0x8f, 0xa9, 0xed, 0x70, 0x4c, 0x3d, 0x49, 0x8f, 0xa9, + 0x42, 0xeb, 0xf3, 0x94, 0xc4, 0x9b, 0x0c, 0x96, 0x9a, 0x65, 0xb5, 0x6f, 0xe0, 0xce, 0x80, 0xbc, + 0x4f, 0x26, 0xd6, 0x8f, 0x44, 0x08, 0x3c, 0x51, 0x06, 0x48, 0x8b, 0x6b, 0x2c, 0x89, 0x5b, 0xfb, + 0xd3, 0x80, 0x62, 0x4c, 0xe1, 0x01, 0xf8, 0x14, 0xf6, 0x1d, 0x35, 0xfb, 0x6c, 0x11, 0x74, 0x56, + 0x11, 0x0a, 0xad, 0xcf, 0x56, 0x1a, 0x7e, 0x73, 0x3c, 0xf6, 0xb7, 0xac, 0x42, 0x48, 0x54, 0x80, + 0x20, 0x8f, 0x50, 0x75, 0xeb, 0x3c, 0x99, 0xb5, 0x79, 0x6e, 0x1a, 0x27, 0xc8, 0x13, 0x12, 0xc3, + 0x3c, 0x8f, 0x21, 0x47, 0xc9, 0x7b, 0xa9, 0x5c, 0xb1, 0xc4, 0xdf, 0xb0, 0xdb, 0xfe, 0x96, 0xa5, + 0x08, 0x9d, 0x02, 0xec, 0x71, 0x72, 0xa5, 0xe7, 0xfa, 0xdf, 0x19, 0xa8, 0xa4, 0xf7, 0x29, 0xe6, + 0x9e, 0x44, 0x5f, 0xc3, 0xe1, 0xba, 0x83, 0xa1, 0xff, 0x1d, 0xb7, 0xd6, 0x9c, 0x0b, 0xf4, 0x25, + 0x94, 0x57, 0x4e, 0xb4, 0xfe, 0xab, 0x94, 0x96, 0x0f, 0x74, 0xa0, 0xf9, 0x94, 0x2c, 0xec, 0x31, + 0x96, 0x38, 0x32, 0xf4, 0x94, 0x2c, 0x4e, 0xb0, 0xc4, 0xe8, 0x31, 0x14, 0x7d, 0x42, 0x78, 0x32, + 0x48, 0x73, 0x1b, 0x07, 0xe9, 0x7e, 0x00, 0xbc, 0x39, 0x47, 0x3f, 0x7d, 0x04, 0x3f, 0x84, 0x83, + 0x29, 0x21, 0xbe, 0xed, 0x5c, 0x62, 0x4a, 0x89, 0x67, 0x33, 0x9f, 0x50, 0xe5, 0xe8, 0xbc, 0x55, + 0x0e, 0x16, 0xba, 0x61, 0xfc, 0xdc, 0x27, 0x14, 0x9d, 0xc1, 0x81, 0xaa, 0x6f, 0xc9, 0xfd, 0xbb, + 0x1f, 0xe3, 0xfe, 0x72, 0xc0, 0xb3, 0x52, 0xe3, 0xf1, 0x45, 0x5a, 0xf5, 0x91, 0xc4, 0x72, 0xae, + 0x2e, 0x26, 0x0e, 0x1b, 0x13, 0xa5, 0x72, 0xd1, 0x52, 0xcf, 0xc8, 0x84, 0xdd, 0x31, 0x91, 0xd8, + 0x55, 0xff, 0xbb, 0x40, 0xce, 0xe8, 0xb5, 0xf6, 0x87, 0x01, 0xa5, 0xa5, 0xc6, 0xf9, 0xc1, 0xc5, + 0x87, 0xcd, 0xa5, 0xfd, 0x2e, 0x38, 0x05, 0x91, 0xa1, 0xf7, 0xd8, 0x5c, 0x9e, 0xaa, 0x00, 0xfa, + 0x02, 0x4a, 0xca, 0xea, 0xb6, 0xc3, 0xa8, 0x98, 0xcf, 0xc8, 0x58, 0xa5, 0x2c, 0x5a, 0x45, 0x15, + 0xed, 0xea, 0x20, 0x6a, 0xc1, 0x0e, 0x57, 0x36, 0xd0, 0xce, 0x3a, 0x5e, 0xf3, 0xe3, 0xd6, 0x46, + 0xb1, 0x34, 0x32, 0xe0, 0x08, 0xb5, 0x09, 0xdd, 0xb2, 0xb5, 0x9c, 0x70, 0x9b, 0x96, 0x46, 0x3e, + 0xfc, 0x01, 0x0e, 0x6e, 0x5c, 0x04, 0x50, 0x0d, 0xfe, 0xd7, 0x6f, 0x0f, 0x4e, 0x46, 0xfd, 0xf6, + 0xcb, 0x9e, 0x3d, 0xb4, 0xce, 0x2f, 0xce, 0xbb, 0xe7, 0xaf, 0xec, 0xd7, 0x83, 0xd1, 0xb0, 0xd7, + 0x3d, 0x3b, 0x3d, 0xeb, 0x9d, 0x54, 0xb6, 0xd0, 0x2e, 0x64, 0x2f, 0x5e, 0x8d, 0x2a, 0x06, 0xca, + 0x43, 0xae, 0xfd, 0xea, 0x62, 0x54, 0xc9, 0x3c, 0xec, 0x41, 0x79, 0xe5, 0x96, 0x86, 0xaa, 0x70, + 0x6f, 0xd0, 0xbb, 0xf8, 0xf9, 0xdc, 0x7a, 0xf9, 0xa1, 0x3c, 0xdd, 0x61, 0xc5, 0x08, 0x1e, 0x5e, + 0x9f, 0x0c, 0x2b, 0x99, 0xd6, 0x9b, 0x54, 0x49, 0x7c, 0x14, 0xde, 0xd9, 0xd0, 0x29, 0x14, 0x4e, + 0x58, 0x1c, 0x46, 0x77, 0xd6, 0xcb, 0x71, 0x75, 0x6c, 0x6e, 0xd0, 0xc9, 0xaf, 0x6d, 0xd5, 0x8d, + 0xaf, 0x8c, 0xce, 0x14, 0x6e, 0xbb, 0x2c, 0xc4, 0x60, 0x4f, 0x8a, 0x86, 0x4b, 0x25, 0xe1, 0x14, + 0x7b, 0x9d, 0x72, 0x02, 0x57, 0xd5, 0x0f, 0x8d, 0x5f, 0x9e, 0x4f, 0x18, 0x9b, 0x78, 0xa4, 0x31, + 0x61, 0x1e, 0xa6, 0x93, 0x06, 0xe3, 0x93, 0xa6, 0xba, 0x0a, 0x3b, 0x9c, 0x28, 0xe3, 0x62, 0x4f, + 0x34, 0x83, 0x24, 0xcd, 0x28, 0x49, 0x53, 0x9d, 0x3a, 0x05, 0xb2, 0x27, 0x8e, 0xff, 0x76, 0x47, + 0xbd, 0x3f, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x37, 0x34, 0x9b, 0x67, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go new file mode 100644 index 0000000000..27510d4de9 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/transport_security_common.proto + +package grpc_gcp // import "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The security level of the created channel. The list is sorted in increasing +// level of security. This order must always be maintained. +type SecurityLevel int32 + +const ( + SecurityLevel_SECURITY_NONE SecurityLevel = 0 + SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1 + SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2 +) + +var SecurityLevel_name = map[int32]string{ + 0: "SECURITY_NONE", + 1: "INTEGRITY_ONLY", + 2: "INTEGRITY_AND_PRIVACY", +} +var SecurityLevel_value = map[string]int32{ + "SECURITY_NONE": 0, + "INTEGRITY_ONLY": 1, + "INTEGRITY_AND_PRIVACY": 2, +} + +func (x SecurityLevel) String() string { + return proto.EnumName(SecurityLevel_name, int32(x)) +} +func (SecurityLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_transport_security_common_71945991f2c3b4a6, []int{0} +} + +// Max and min supported RPC protocol versions. +type RpcProtocolVersions struct { + // Maximum supported RPC version. + MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"` + // Minimum supported RPC version. + MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcProtocolVersions) Reset() { *m = RpcProtocolVersions{} } +func (m *RpcProtocolVersions) String() string { return proto.CompactTextString(m) } +func (*RpcProtocolVersions) ProtoMessage() {} +func (*RpcProtocolVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_transport_security_common_71945991f2c3b4a6, []int{0} +} +func (m *RpcProtocolVersions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcProtocolVersions.Unmarshal(m, b) +} +func (m *RpcProtocolVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcProtocolVersions.Marshal(b, m, deterministic) +} +func (dst *RpcProtocolVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcProtocolVersions.Merge(dst, src) +} +func (m *RpcProtocolVersions) XXX_Size() int { + return xxx_messageInfo_RpcProtocolVersions.Size(m) +} +func (m *RpcProtocolVersions) XXX_DiscardUnknown() { + xxx_messageInfo_RpcProtocolVersions.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcProtocolVersions proto.InternalMessageInfo + +func (m *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version { + if m != nil { + return m.MaxRpcVersion + } + return nil +} + +func (m *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version { + if m != nil { + return m.MinRpcVersion + } + return nil +} + +// RPC version contains a major version and a minor version. +type RpcProtocolVersions_Version struct { + Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcProtocolVersions_Version) Reset() { *m = RpcProtocolVersions_Version{} } +func (m *RpcProtocolVersions_Version) String() string { return proto.CompactTextString(m) } +func (*RpcProtocolVersions_Version) ProtoMessage() {} +func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) { + return fileDescriptor_transport_security_common_71945991f2c3b4a6, []int{0, 0} +} +func (m *RpcProtocolVersions_Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcProtocolVersions_Version.Unmarshal(m, b) +} +func (m *RpcProtocolVersions_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcProtocolVersions_Version.Marshal(b, m, deterministic) +} +func (dst *RpcProtocolVersions_Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcProtocolVersions_Version.Merge(dst, src) +} +func (m *RpcProtocolVersions_Version) XXX_Size() int { + return xxx_messageInfo_RpcProtocolVersions_Version.Size(m) +} +func (m *RpcProtocolVersions_Version) XXX_DiscardUnknown() { + xxx_messageInfo_RpcProtocolVersions_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcProtocolVersions_Version proto.InternalMessageInfo + +func (m *RpcProtocolVersions_Version) GetMajor() uint32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *RpcProtocolVersions_Version) GetMinor() uint32 { + if m != nil { + return m.Minor + } + return 0 +} + +func init() { + proto.RegisterType((*RpcProtocolVersions)(nil), "grpc.gcp.RpcProtocolVersions") + proto.RegisterType((*RpcProtocolVersions_Version)(nil), "grpc.gcp.RpcProtocolVersions.Version") + proto.RegisterEnum("grpc.gcp.SecurityLevel", SecurityLevel_name, SecurityLevel_value) +} + +func init() { + proto.RegisterFile("grpc/gcp/transport_security_common.proto", fileDescriptor_transport_security_common_71945991f2c3b4a6) +} + +var fileDescriptor_transport_security_common_71945991f2c3b4a6 = []byte{ + // 323 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x41, 0x4b, 0x3b, 0x31, + 0x10, 0xc5, 0xff, 0x5b, 0xf8, 0xab, 0x44, 0x56, 0xeb, 0x6a, 0x41, 0xc5, 0x83, 0x08, 0x42, 0xf1, + 0x90, 0x05, 0xc5, 0xb3, 0xb4, 0xb5, 0x48, 0xa1, 0x6e, 0xeb, 0xb6, 0x16, 0xea, 0x25, 0xc4, 0x18, + 0x42, 0x24, 0x9b, 0x09, 0xb3, 0xb1, 0xd4, 0xaf, 0xec, 0xa7, 0x90, 0x4d, 0xbb, 0x14, 0xc1, 0x8b, + 0xb7, 0xbc, 0xc7, 0xcc, 0x6f, 0x32, 0xf3, 0x48, 0x5b, 0xa1, 0x13, 0xa9, 0x12, 0x2e, 0xf5, 0xc8, + 0x6d, 0xe9, 0x00, 0x3d, 0x2b, 0xa5, 0xf8, 0x40, 0xed, 0x3f, 0x99, 0x80, 0xa2, 0x00, 0x4b, 0x1d, + 0x82, 0x87, 0x64, 0xa7, 0xaa, 0xa4, 0x4a, 0xb8, 0x8b, 0xaf, 0x88, 0x1c, 0xe6, 0x4e, 0x8c, 0x2b, + 0x5b, 0x80, 0x99, 0x49, 0x2c, 0x35, 0xd8, 0x32, 0x79, 0x24, 0xfb, 0x05, 0x5f, 0x32, 0x74, 0x82, + 0x2d, 0x56, 0xde, 0x71, 0x74, 0x1e, 0xb5, 0x77, 0xaf, 0x2f, 0x69, 0xdd, 0x4b, 0x7f, 0xe9, 0xa3, + 0xeb, 0x47, 0x1e, 0x17, 0x7c, 0x99, 0x3b, 0xb1, 0x96, 0x01, 0xa7, 0xed, 0x0f, 0x5c, 0xe3, 0x6f, + 0x38, 0x6d, 0x37, 0xb8, 0xd3, 0x5b, 0xb2, 0x5d, 0x93, 0x8f, 0xc8, 0xff, 0x82, 0xbf, 0x03, 0x86, + 0xef, 0xc5, 0xf9, 0x4a, 0x04, 0x57, 0x5b, 0xc0, 0x30, 0xa5, 0x72, 0x2b, 0x71, 0xf5, 0x44, 0xe2, + 0xc9, 0xfa, 0x1e, 0x43, 0xb9, 0x90, 0x26, 0x39, 0x20, 0xf1, 0xa4, 0xdf, 0x7b, 0xce, 0x07, 0xd3, + 0x39, 0xcb, 0x46, 0x59, 0xbf, 0xf9, 0x2f, 0x49, 0xc8, 0xde, 0x20, 0x9b, 0xf6, 0x1f, 0x82, 0x37, + 0xca, 0x86, 0xf3, 0x66, 0x94, 0x9c, 0x90, 0xd6, 0xc6, 0xeb, 0x64, 0xf7, 0x6c, 0x9c, 0x0f, 0x66, + 0x9d, 0xde, 0xbc, 0xd9, 0xe8, 0x2e, 0x49, 0x4b, 0xc3, 0x6a, 0x07, 0x6e, 0x7c, 0x49, 0xb5, 0xf5, + 0x12, 0x2d, 0x37, 0xdd, 0xb3, 0x69, 0x9d, 0x41, 0x3d, 0xb2, 0x17, 0x12, 0x08, 0x2b, 0x8e, 0xa3, + 0x97, 0x3b, 0x05, 0xa0, 0x8c, 0xa4, 0x0a, 0x0c, 0xb7, 0x8a, 0x02, 0xaa, 0x34, 0xc4, 0x27, 0x50, + 0xbe, 0x49, 0xeb, 0x35, 0x37, 0x65, 0x5a, 0x11, 0xd3, 0x9a, 0x98, 0x86, 0xe8, 0x42, 0x11, 0x53, + 0xc2, 0xbd, 0x6e, 0x05, 0x7d, 0xf3, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x31, 0x14, 0xb4, 0x11, 0xf6, + 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/regenerate.sh b/vendor/google.golang.org/grpc/credentials/alts/internal/regenerate.sh new file mode 100644 index 0000000000..a79c4201b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/regenerate.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +TMP=$(mktemp -d) + +function finish { + rm -rf "$TMP" +} +trap finish EXIT + +pushd "$TMP" +mkdir -p grpc/gcp +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/gcp/altscontext.proto > grpc/gcp/altscontext.proto +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/gcp/handshaker.proto > grpc/gcp/handshaker.proto +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/gcp/transport_security_common.proto > grpc/gcp/transport_security_common.proto + +protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/gcp/*.proto +popd +rm -f proto/grpc_gcp/*.pb.go +cp "$TMP"/grpc/gcp/*.pb.go proto/grpc_gcp/ + diff --git a/vendor/google.golang.org/grpc/credentials/alts/utils.go b/vendor/google.golang.org/grpc/credentials/alts/utils.go new file mode 100644 index 0000000000..4ed27c605b --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/utils.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package alts + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + + "google.golang.org/grpc/peer" +) + +const ( + linuxProductNameFile = "/sys/class/dmi/id/product_name" + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +type platformError string + +func (k platformError) Error() string { + return fmt.Sprintf("%s is not supported", string(k)) +} + +var ( + // The following two variables will be reassigned in tests. + runningOS = runtime.GOOS + manufacturerReader = func() (io.Reader, error) { + switch runningOS { + case "linux": + return os.Open(linuxProductNameFile) + case "windows": + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return strings.NewReader(name), nil + } + } + + return nil, errors.New("cannot determine the machine's manufacturer") + default: + return nil, platformError(runningOS) + } + } + vmOnGCP bool +) + +// isRunningOnGCP checks whether the local system, without doing a network request is +// running on GCP. +func isRunningOnGCP() bool { + manufacturer, err := readManufacturer() + if err != nil { + log.Fatalf("failure to read manufacturer information: %v", err) + } + name := string(manufacturer) + switch runningOS { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + log.Fatal(platformError(runningOS)) + } + return false +} + +func readManufacturer() ([]byte, error) { + reader, err := manufacturerReader() + if err != nil { + return nil, err + } + if reader == nil { + return nil, errors.New("got nil reader") + } + manufacturer, err := ioutil.ReadAll(reader) + if err != nil { + return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) + } + return manufacturer, nil +} + +// AuthInfoFromContext extracts the alts.AuthInfo object from the given context, +// if it exists. This API should be used by gRPC server RPC handlers to get +// information about the communicating peer. For client-side, use grpc.Peer() +// CallOption. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} + +// AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it +// exists. This API should be used by gRPC clients after obtaining a peer object +// using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + altsAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no alts.AuthInfo found in Peer") + } + return altsAuthInfo, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go new file mode 100644 index 0000000000..04b349abcf --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -0,0 +1,125 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package google defines credentials for google cloud services. +package google + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/alts" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +const tokenRequestTimeout = 30 * time.Second + +// NewDefaultCredentials returns a credentials bundle that is configured to work +// with google services. +// +// This API is experimental. +func NewDefaultCredentials() credentials.Bundle { + c := &creds{ + newPerRPCCreds: func() credentials.PerRPCCredentials { + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + perRPCCreds, err := oauth.NewApplicationDefault(ctx) + if err != nil { + grpclog.Warningf("google default creds: failed to create application oauth: %v", err) + } + return perRPCCreds + }, + } + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + grpclog.Warningf("google default creds: failed to create new creds: %v", err) + } + return bundle +} + +// NewComputeEngineCredentials returns a credentials bundle that is configured to work +// with google services. This API must only be used when running on GCE. Authentication configured +// by this API represents the GCE VM's default service account. +// +// This API is experimental. +func NewComputeEngineCredentials() credentials.Bundle { + c := &creds{ + newPerRPCCreds: func() credentials.PerRPCCredentials { + return oauth.NewComputeEngine() + }, + } + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + grpclog.Warningf("compute engine creds: failed to create new creds: %v", err) + } + return bundle +} + +// creds implements credentials.Bundle. +type creds struct { + // Supported modes are defined in internal/internal.go. + mode string + // The transport credentials associated with this bundle. + transportCreds credentials.TransportCredentials + // The per RPC credentials associated with this bundle. + perRPCCreds credentials.PerRPCCredentials + // Creates new per RPC credentials + newPerRPCCreds func() credentials.PerRPCCredentials +} + +func (c *creds) TransportCredentials() credentials.TransportCredentials { + return c.transportCreds +} + +func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { + if c == nil { + return nil + } + return c.perRPCCreds +} + +// NewWithMode should make a copy of Bundle, and switch mode. Modifying the +// existing Bundle may cause races. +func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { + newCreds := &creds{ + mode: mode, + newPerRPCCreds: c.newPerRPCCreds, + } + + // Create transport credentials. + switch mode { + case internal.CredsBundleModeFallback: + newCreds.transportCreds = credentials.NewTLS(nil) + case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: + // Only the clients can use google default credentials, so we only need + // to create new ALTS client creds here. + newCreds.transportCreds = alts.NewClientCreds(alts.DefaultClientOptions()) + default: + return nil, fmt.Errorf("unsupported mode: %v", mode) + } + + if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { + newCreds.perRPCCreds = newCreds.newPerRPCCreds() + } + + return newCreds, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 386d2f0cc0..8b6bdceb38 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -15,28 +15,34 @@ cloud.google.com/go/spanner/internal/backoff cloud.google.com/go/storage # code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f code.cloudfoundry.org/gofileutils/fileutils -# contrib.go.opencensus.io/exporter/ocagent v0.4.12 -contrib.go.opencensus.io/exporter/ocagent -# github.com/Azure/azure-sdk-for-go v29.0.0+incompatible +# github.com/Azure/azure-sdk-for-go v36.2.0+incompatible github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac -github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault +github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/Azure/go-autorest v11.7.1+incompatible +# github.com/Azure/go-autorest/autorest v0.9.2 github.com/Azure/go-autorest/autorest -github.com/Azure/go-autorest/autorest/adal github.com/Azure/go-autorest/autorest/azure +# github.com/Azure/go-autorest/autorest/adal v0.7.0 +github.com/Azure/go-autorest/autorest/adal +# github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 github.com/Azure/go-autorest/autorest/azure/auth +# github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 github.com/Azure/go-autorest/autorest/azure/cli +# github.com/Azure/go-autorest/autorest/date v0.2.0 github.com/Azure/go-autorest/autorest/date +# github.com/Azure/go-autorest/autorest/to v0.3.0 github.com/Azure/go-autorest/autorest/to +# github.com/Azure/go-autorest/autorest/validation v0.2.0 github.com/Azure/go-autorest/autorest/validation +# github.com/Azure/go-autorest/logger v0.1.0 github.com/Azure/go-autorest/logger +# github.com/Azure/go-autorest/tracing v0.5.0 github.com/Azure/go-autorest/tracing # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml @@ -159,13 +165,6 @@ github.com/briankassouf/jose/jws github.com/briankassouf/jose/jwt # github.com/cenkalti/backoff v2.2.1+incompatible github.com/cenkalti/backoff -# github.com/census-instrumentation/opencensus-proto v0.2.0 -github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 # github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f github.com/centrify/cloud-golang-sdk/oauth github.com/centrify/cloud-golang-sdk/restapi @@ -254,12 +253,8 @@ github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys # github.com/golang/protobuf v1.3.2 -github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/protoc-gen-go/descriptor -github.com/golang/protobuf/protoc-gen-go/generator -github.com/golang/protobuf/protoc-gen-go/generator/internal/remap -github.com/golang/protobuf/protoc-gen-go/plugin github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration @@ -279,14 +274,10 @@ github.com/google/go-querystring/query github.com/google/gofuzz # github.com/google/uuid v1.0.0 github.com/google/uuid -# github.com/googleapis/gax-go/v2 v2.0.4 +# github.com/googleapis/gax-go/v2 v2.0.5 github.com/googleapis/gax-go/v2 # github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 github.com/gorhill/cronexpr -# github.com/grpc-ecosystem/grpc-gateway v1.8.5 -github.com/grpc-ecosystem/grpc-gateway/internal -github.com/grpc-ecosystem/grpc-gateway/runtime -github.com/grpc-ecosystem/grpc-gateway/utilities # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed github.com/hailocab/go-hostpool # github.com/hashicorp/consul-template v0.22.0 @@ -312,6 +303,17 @@ github.com/hashicorp/go-gcp-common/gcputil github.com/hashicorp/go-hclog # github.com/hashicorp/go-immutable-radix v1.1.0 github.com/hashicorp/go-immutable-radix +# github.com/hashicorp/go-kms-wrapping v0.0.0-20191229213738-edc2c6e9ee1d +github.com/hashicorp/go-kms-wrapping +github.com/hashicorp/go-kms-wrapping/entropy +github.com/hashicorp/go-kms-wrapping/internal/xor +github.com/hashicorp/go-kms-wrapping/wrappers/aead +github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms +github.com/hashicorp/go-kms-wrapping/wrappers/awskms +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms +github.com/hashicorp/go-kms-wrapping/wrappers/ocikms +github.com/hashicorp/go-kms-wrapping/wrappers/transit # github.com/hashicorp/go-memdb v1.0.2 github.com/hashicorp/go-memdb # github.com/hashicorp/go-msgpack v0.5.5 @@ -398,7 +400,7 @@ github.com/hashicorp/serf/coordinate # github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190814210027-93970f08f2ec github.com/hashicorp/vault-plugin-auth-alicloud github.com/hashicorp/vault-plugin-auth-alicloud/tools -# github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20190814210035-08e00d801115 +# github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20191119151105-86f21fbc96e3 github.com/hashicorp/vault-plugin-auth-azure # github.com/hashicorp/vault-plugin-auth-centrify v0.5.2-0.20190814210042-090ec2ed93ce github.com/hashicorp/vault-plugin-auth-centrify @@ -429,9 +431,9 @@ github.com/hashicorp/vault-plugin-secrets-ad/plugin/util # github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190814210129-4d18bec92f56 github.com/hashicorp/vault-plugin-secrets-alicloud github.com/hashicorp/vault-plugin-secrets-alicloud/clients -# github.com/hashicorp/vault-plugin-secrets-azure v0.5.2 +# github.com/hashicorp/vault-plugin-secrets-azure v0.5.3-0.20191119150734-45c076c82f1d github.com/hashicorp/vault-plugin-secrets-azure -# github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191112195538-3c798536d157 +# github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191119222840-524d5b57ed36 github.com/hashicorp/vault-plugin-secrets-gcp/plugin github.com/hashicorp/vault-plugin-secrets-gcp/plugin/iamutil github.com/hashicorp/vault-plugin-secrets-gcp/plugin/util @@ -441,12 +443,13 @@ github.com/hashicorp/vault-plugin-secrets-gcpkms github.com/hashicorp/vault-plugin-secrets-kv # github.com/hashicorp/vault/api v1.0.5-0.20191218213558-0bc25f908162 => ./api github.com/hashicorp/vault/api -# github.com/hashicorp/vault/sdk v0.1.14-0.20191218213202-9caafff72a1f => ./sdk +# github.com/hashicorp/vault/sdk v0.1.14-0.20191229212425-c478d00be0d6 => ./sdk github.com/hashicorp/vault/sdk/database/dbplugin github.com/hashicorp/vault/sdk/database/helper/connutil github.com/hashicorp/vault/sdk/database/helper/credsutil github.com/hashicorp/vault/sdk/database/helper/dbutil github.com/hashicorp/vault/sdk/framework +github.com/hashicorp/vault/sdk/helper/awsutil github.com/hashicorp/vault/sdk/helper/base62 github.com/hashicorp/vault/sdk/helper/certutil github.com/hashicorp/vault/sdk/helper/cidrutil @@ -454,7 +457,6 @@ github.com/hashicorp/vault/sdk/helper/compressutil github.com/hashicorp/vault/sdk/helper/consts github.com/hashicorp/vault/sdk/helper/cryptoutil github.com/hashicorp/vault/sdk/helper/dbtxn -github.com/hashicorp/vault/sdk/helper/entropy github.com/hashicorp/vault/sdk/helper/errutil github.com/hashicorp/vault/sdk/helper/hclutil github.com/hashicorp/vault/sdk/helper/identitytpl @@ -487,7 +489,7 @@ github.com/hashicorp/vault/sdk/plugin/mock github.com/hashicorp/vault/sdk/plugin/pb github.com/hashicorp/vault/sdk/queue github.com/hashicorp/vault/sdk/version -# github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d +# github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb github.com/hashicorp/yamux # github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 github.com/influxdata/influxdb/client/v2 @@ -588,7 +590,7 @@ github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v0.1.1 github.com/opencontainers/runc/libcontainer/user -# github.com/oracle/oci-go-sdk v7.0.0+incompatible +# github.com/oracle/oci-go-sdk v12.5.0+incompatible github.com/oracle/oci-go-sdk/common github.com/oracle/oci-go-sdk/common/auth github.com/oracle/oci-go-sdk/keymanagement @@ -721,7 +723,6 @@ go.opencensus.io/metric/metricproducer go.opencensus.io/plugin/ocgrpc go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/b3 -go.opencensus.io/plugin/ochttp/propagation/tracecontext go.opencensus.io/resource go.opencensus.io/stats go.opencensus.io/stats/internal @@ -773,15 +774,13 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a +# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20190423024810-112230192c58 -golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20191008105621-543471e840be golang.org/x/sys/cpu golang.org/x/sys/unix @@ -799,20 +798,19 @@ golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time/rate -# google.golang.org/api v0.5.0 +# google.golang.org/api v0.14.0 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 -google.golang.org/api/gensupport google.golang.org/api/googleapi -google.golang.org/api/googleapi/internal/uritemplates google.golang.org/api/googleapi/transport google.golang.org/api/iam/v1 google.golang.org/api/internal +google.golang.org/api/internal/gensupport +google.golang.org/api/internal/third_party/uritemplates google.golang.org/api/iterator google.golang.org/api/oauth2/v2 google.golang.org/api/option google.golang.org/api/storage/v1 -google.golang.org/api/support/bundler google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http @@ -835,7 +833,6 @@ google.golang.org/appengine/urlfetch google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/distribution -google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres @@ -852,11 +849,21 @@ google.golang.org/genproto/protobuf/field_mask google.golang.org/grpc google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb +google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/alts +google.golang.org/grpc/credentials/alts/internal +google.golang.org/grpc/credentials/alts/internal/authinfo +google.golang.org/grpc/credentials/alts/internal/conn +google.golang.org/grpc/credentials/alts/internal/handshaker +google.golang.org/grpc/credentials/alts/internal/handshaker/service +google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp +google.golang.org/grpc/credentials/google google.golang.org/grpc/credentials/internal google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding