From 79c47b9433714d78d8d12fb7d27da97d5cac2ce7 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 18 Jul 2017 10:15:54 -0400 Subject: [PATCH] Bump deps --- .../go/internal/version/version.go | 2 +- vendor/cloud.google.com/go/storage/acl.go | 95 +- vendor/cloud.google.com/go/storage/bucket.go | 52 +- vendor/cloud.google.com/go/storage/copy.go | 21 +- vendor/cloud.google.com/go/storage/storage.go | 16 +- vendor/cloud.google.com/go/storage/writer.go | 3 + .../Azure/azure-sdk-for-go/storage/blob.go | 31 +- .../azure-sdk-for-go/storage/blockblob.go | 19 +- vendor/github.com/Azure/go-ansiterm/parser.go | 2 +- .../go-ansiterm/winterm/win_event_handler.go | 2 +- .../DataDog/datadog-go/statsd/statsd.go | 20 +- vendor/github.com/SAP/go-hdb/NOTICE | 0 .../github.com/Sirupsen/logrus/CHANGELOG.md | 9 + vendor/github.com/Sirupsen/logrus/README.md | 31 +- vendor/github.com/Sirupsen/logrus/entry.go | 1 + vendor/github.com/Sirupsen/logrus/exported.go | 2 +- vendor/github.com/Sirupsen/logrus/logger.go | 2 +- .../Sirupsen/logrus/text_formatter.go | 36 +- .../aws-sdk-go/aws/client/default_retryer.go | 4 +- .../github.com/aws/aws-sdk-go/aws/config.go | 2 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 79 +- .../github.com/aws/aws-sdk-go/aws/logger.go | 4 +- .../aws/aws-sdk-go/aws/request/retryer.go | 2 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 30 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/ec2/api.go | 665 +++++++- .../aws/aws-sdk-go/service/s3/api.go | 9 + .../github.com/boombuler/barcode/barcode.go | 15 + .../github.com/boombuler/barcode/qr/qrcode.go | 2 +- vendor/github.com/cenk/backoff/exponential.go | 7 +- vendor/github.com/cenk/backoff/tries.go | 35 + .../github.com/coreos/etcd/clientv3/client.go | 11 + .../coreos/etcd/clientv3/compare.go | 12 + .../coreos/etcd/clientv3/concurrency/stm.go | 4 +- .../github.com/coreos/etcd/clientv3/config.go | 8 + vendor/github.com/coreos/etcd/clientv3/kv.go | 9 +- .../coreos/etcd/clientv3/maintenance.go | 10 + vendor/github.com/coreos/etcd/clientv3/op.go | 44 + .../etcdserver/api/v3rpc/rpctypes/error.go | 3 + .../etcdserver/etcdserverpb/etcdserver.pb.go | 2 + .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 1215 ++++++++++---- .../etcd/etcdserver/etcdserverpb/rpc.proto | 29 +- .../coreos/etcd/pkg/transport/listener.go | 5 +- .../coreos/etcd/pkg/transport/listener_tls.go | 87 +- .../github.com/coreos/etcd/version/version.go | 2 +- .../denisenkom/go-mssqldb/README.md | 4 + .../denisenkom/go-mssqldb/appveyor.yml | 4 +- .../github.com/denisenkom/go-mssqldb/types.go | 4 + .../docker/docker/api/types/client.go | 7 +- .../docker/docker/api/types/configs.go | 1 + .../docker/api/types/swarm/runtime/gen.go | 3 + .../api/types/swarm/runtime/plugin.pb.go | 712 ++++++++ .../api/types/swarm/runtime/plugin.proto | 18 + .../docker/docker/api/types/swarm/secret.go | 3 +- .../docker/docker/api/types/swarm/swarm.go | 2 +- .../docker/docker/api/types/swarm/task.go | 12 +- .../docker/docker/api/types/types.go | 22 +- vendor/github.com/docker/docker/opts/ip.go | 2 +- .../docker/docker/pkg/archive/archive.go | 75 +- .../docker/pkg/archive/archive_linux.go | 6 +- .../docker/docker/pkg/archive/archive_unix.go | 31 +- .../docker/pkg/archive/changes_linux.go | 7 +- .../docker/docker/pkg/archive/changes_unix.go | 5 +- .../docker/docker/pkg/archive/copy.go | 3 + .../docker/docker/pkg/archive/diff.go | 2 +- .../homedir/{homedir.go => homedir_unix.go} | 11 +- .../docker/pkg/homedir/homedir_windows.go | 24 + .../docker/docker/pkg/idtools/idtools_unix.go | 2 +- .../docker/pkg/idtools/idtools_windows.go | 2 +- .../docker/docker/pkg/mount/flags_linux.go | 48 +- .../docker/pkg/mount/mounter_freebsd.go | 5 +- .../docker/docker/pkg/mount/mounter_linux.go | 23 +- .../docker/docker/pkg/system/chtimes.go | 17 - .../docker/pkg/system/chtimes_windows.go | 19 +- .../docker/pkg/system/events_windows.go | 24 +- .../docker/docker/pkg/system/filesys.go | 9 +- .../docker/pkg/system/filesys_windows.go | 107 +- .../docker/docker/pkg/system/init.go | 22 + .../docker/docker/pkg/system/init_windows.go | 17 + .../docker/docker/pkg/system/lcow_unix.go | 8 + .../docker/docker/pkg/system/lcow_windows.go | 6 + .../docker/docker/pkg/system/lstat_unix.go | 4 +- .../docker/docker/pkg/system/mknod.go | 4 +- .../docker/docker/pkg/system/path.go | 21 + .../docker/docker/pkg/system/path_unix.go | 5 - .../docker/docker/pkg/system/path_windows.go | 6 +- .../docker/docker/pkg/system/process_unix.go | 8 +- .../github.com/docker/docker/pkg/system/rm.go | 2 +- .../docker/docker/pkg/system/stat_unix.go | 4 +- .../docker/docker/pkg/system/syscall_unix.go | 4 +- .../docker/pkg/system/syscall_windows.go | 14 +- .../docker/docker/pkg/system/umask.go | 4 +- .../docker/pkg/system/utimes_freebsd.go | 8 +- .../docker/docker/pkg/system/utimes_linux.go | 13 +- .../docker/docker/pkg/system/xattrs_linux.go | 48 +- .../docker/docker/pkg/term/term_windows.go | 12 +- .../github.com/fsouza/go-dockerclient/AUTHORS | 3 + .../fsouza/go-dockerclient/appveyor.yml | 3 +- .../fsouza/go-dockerclient/container.go | 2 +- .../github.com/fsouza/go-dockerclient/exec.go | 5 + vendor/github.com/go-ini/ini/ini.go | 2 +- vendor/github.com/go-ini/ini/struct.go | 5 +- vendor/github.com/go-ldap/ldap/dn.go | 3 + vendor/github.com/go-ldap/ldap/filter.go | 5 +- vendor/github.com/go-sql-driver/mysql/AUTHORS | 1 + .../github.com/go-sql-driver/mysql/driver.go | 10 +- vendor/github.com/go-sql-driver/mysql/dsn.go | 10 +- vendor/github.com/gocql/gocql/AUTHORS | 2 + vendor/github.com/gocql/gocql/conn.go | 10 +- .../github.com/gocql/gocql/connectionpool.go | 14 +- vendor/github.com/gocql/gocql/control.go | 60 +- vendor/github.com/gocql/gocql/events.go | 33 +- vendor/github.com/gocql/gocql/filters.go | 4 +- vendor/github.com/gocql/gocql/host_source.go | 474 ++++-- vendor/github.com/gocql/gocql/marshal.go | 104 +- vendor/github.com/gocql/gocql/policies.go | 16 +- vendor/github.com/gocql/gocql/ring.go | 13 +- vendor/github.com/gocql/gocql/session.go | 28 +- vendor/github.com/gocql/gocql/token.go | 2 +- vendor/github.com/gogo/protobuf/LICENSE | 36 + .../github.com/gogo/protobuf/proto/Makefile | 43 + .../github.com/gogo/protobuf/proto/clone.go | 234 +++ .../github.com/gogo/protobuf/proto/decode.go | 978 +++++++++++ .../gogo/protobuf/proto/decode_gogo.go | 172 ++ .../gogo/protobuf/proto/duration.go | 100 ++ .../gogo/protobuf/proto/duration_gogo.go | 203 +++ .../github.com/gogo/protobuf/proto/encode.go | 1362 +++++++++++++++ .../gogo/protobuf/proto/encode_gogo.go | 350 ++++ .../github.com/gogo/protobuf/proto/equal.go | 300 ++++ .../gogo/protobuf/proto/extensions.go | 693 ++++++++ .../gogo/protobuf/proto/extensions_gogo.go | 294 ++++ vendor/github.com/gogo/protobuf/proto/lib.go | 898 ++++++++++ .../gogo/protobuf/proto/lib_gogo.go | 42 + .../gogo/protobuf/proto/message_set.go | 311 ++++ .../gogo/protobuf/proto/pointer_reflect.go | 484 ++++++ .../protobuf/proto/pointer_reflect_gogo.go | 85 + .../gogo/protobuf/proto/pointer_unsafe.go | 270 +++ .../protobuf/proto/pointer_unsafe_gogo.go | 128 ++ .../gogo/protobuf/proto/properties.go | 968 +++++++++++ .../gogo/protobuf/proto/properties_gogo.go | 111 ++ .../gogo/protobuf/proto/skip_gogo.go | 119 ++ vendor/github.com/gogo/protobuf/proto/text.go | 928 +++++++++++ .../gogo/protobuf/proto/text_gogo.go | 57 + .../gogo/protobuf/proto/text_parser.go | 1013 ++++++++++++ .../gogo/protobuf/proto/timestamp.go | 113 ++ .../gogo/protobuf/proto/timestamp_gogo.go | 229 +++ .../golang/protobuf/proto/encode.go | 4 +- .../golang/protobuf/proto/text_parser.go | 2 +- .../go-github/github/github-accessors.go | 24 + .../google/go-github/github/github.go | 30 +- .../google/go-github/github/orgs.go | 19 + .../google/go-github/github/projects.go | 7 +- .../google/go-github/github/repos.go | 243 ++- .../go-github/github/repos_collaborators.go | 3 +- .../google/go-github/github/users_gpg_keys.go | 2 +- .../github.com/hashicorp/consul/api/agent.go | 2 +- vendor/github.com/hashicorp/consul/api/api.go | 50 +- .../hashicorp/consul/api/session.go | 9 +- .../github.com/hashicorp/go-msgpack/LICENSE | 25 - .../hashicorp/go-msgpack/codec/0doc.go | 143 -- .../hashicorp/go-msgpack/codec/README.md | 174 -- .../hashicorp/go-msgpack/codec/binc.go | 786 --------- .../hashicorp/go-msgpack/codec/decode.go | 1048 ------------ .../hashicorp/go-msgpack/codec/encode.go | 1001 ------------ .../hashicorp/go-msgpack/codec/helper.go | 589 ------- .../go-msgpack/codec/helper_internal.go | 127 -- .../hashicorp/go-msgpack/codec/msgpack.go | 816 --------- .../go-msgpack/codec/msgpack_test.py | 110 -- .../hashicorp/go-msgpack/codec/rpc.go | 152 -- .../hashicorp/go-msgpack/codec/simple.go | 461 ------ .../hashicorp/go-msgpack/codec/time.go | 193 --- .../hashicorp/go-multierror/multierror.go | 4 +- .../github.com/hashicorp/go-plugin/README.md | 29 +- .../github.com/hashicorp/go-plugin/client.go | 135 +- .../hashicorp/go-plugin/grpc_client.go | 83 + .../hashicorp/go-plugin/grpc_server.go | 115 ++ .../github.com/hashicorp/go-plugin/plugin.go | 31 + .../hashicorp/go-plugin/protocol.go | 45 + .../hashicorp/go-plugin/rpc_client.go | 37 + .../hashicorp/go-plugin/rpc_server.go | 12 +- .../github.com/hashicorp/go-plugin/server.go | 102 +- .../github.com/hashicorp/go-plugin/testing.go | 44 + .../hashicorp/net-rpc-msgpackrpc/README.md | 9 - .../hashicorp/net-rpc-msgpackrpc/client.go | 43 - .../hashicorp/net-rpc-msgpackrpc/codec.go | 122 -- .../net-rpc-msgpackrpc/msgpackrpc.go | 42 - .../github.com/hashicorp/scada-client/LICENSE | 363 ---- .../hashicorp/scada-client/README.md | 23 - .../hashicorp/scada-client/client.go | 146 -- .../hashicorp/scada-client/provider.go | 473 ------ .../hashicorp/scada-client/scada/scada.go | 231 --- .../hashicorp/scada-client/structs.go | 49 - .../go-crypto/openpgp/packet/signature.go | 26 +- vendor/github.com/lib/pq/conn.go | 7 +- .../mattn/go-colorable/colorable_windows.go | 26 +- .../michaelklishin/rabbit-hole/README.md | 4 + .../michaelklishin/rabbit-hole/client.go | 32 +- .../michaelklishin/rabbit-hole/users.go | 19 + vendor/github.com/mitchellh/cli/README.md | 3 + .../github.com/mitchellh/cli/autocomplete.go | 43 + vendor/github.com/mitchellh/cli/cli.go | 204 ++- vendor/github.com/mitchellh/cli/command.go | 20 + .../github.com/mitchellh/cli/command_mock.go | 21 + .../image-spec/specs-go/v1/config.go | 2 +- .../image-spec/specs-go/version.go | 2 +- .../runc/libcontainer/system/linux.go | 11 +- .../runc/libcontainer/system/proc.go | 120 +- .../runc/libcontainer/user/user.go | 4 +- .../github.com/posener/complete/LICENSE.txt | 21 + vendor/github.com/posener/complete/args.go | 75 + vendor/github.com/posener/complete/cmd/cmd.go | 128 ++ .../posener/complete/cmd/install/bash.go | 32 + .../posener/complete/cmd/install/install.go | 92 ++ .../posener/complete/cmd/install/utils.go | 118 ++ .../posener/complete/cmd/install/zsh.go | 39 + vendor/github.com/posener/complete/command.go | 106 ++ .../github.com/posener/complete/complete.go | 86 + vendor/github.com/posener/complete/log.go | 23 + .../github.com/posener/complete/match/file.go | 19 + .../posener/complete/match/match.go | 6 + .../posener/complete/match/prefix.go | 9 + .../posener/complete/metalinter.json | 21 + vendor/github.com/posener/complete/predict.go | 41 + .../posener/complete/predict_files.go | 108 ++ .../posener/complete/predict_set.go | 19 + vendor/github.com/posener/complete/readme.md | 116 ++ vendor/github.com/posener/complete/test.sh | 12 + vendor/github.com/posener/complete/utils.go | 46 + .../github.com/ryanuber/columnize/README.md | 2 + .../ryanuber/columnize/columnize.go | 25 +- vendor/github.com/sethgrid/pester/main.go | 4 +- .../github.com/sirupsen/logrus/CHANGELOG.md | 109 ++ .../LICENSE.md => sirupsen/logrus/LICENSE} | 2 +- vendor/github.com/sirupsen/logrus/README.md | 504 ++++++ vendor/github.com/sirupsen/logrus/alt_exit.go | 64 + vendor/github.com/sirupsen/logrus/doc.go | 26 + vendor/github.com/sirupsen/logrus/entry.go | 276 ++++ vendor/github.com/sirupsen/logrus/exported.go | 193 +++ .../github.com/sirupsen/logrus/formatter.go | 45 + vendor/github.com/sirupsen/logrus/hooks.go | 34 + .../sirupsen/logrus/json_formatter.go | 74 + vendor/github.com/sirupsen/logrus/logger.go | 317 ++++ vendor/github.com/sirupsen/logrus/logrus.go | 143 ++ .../sirupsen/logrus/terminal_appengine.go | 10 + .../sirupsen/logrus/terminal_bsd.go | 10 + .../sirupsen/logrus/terminal_linux.go | 14 + .../sirupsen/logrus/terminal_notwindows.go | 28 + .../sirupsen/logrus/terminal_solaris.go | 21 + .../sirupsen/logrus/terminal_windows.go | 82 + .../sirupsen/logrus/text_formatter.go | 175 ++ vendor/github.com/sirupsen/logrus/writer.go | 62 + vendor/github.com/ugorji/go/codec/binc.go | 9 + vendor/github.com/ugorji/go/codec/cbor.go | 9 + vendor/github.com/ugorji/go/codec/decode.go | 29 +- vendor/github.com/ugorji/go/codec/encode.go | 5 +- vendor/github.com/ugorji/go/codec/gen.go | 24 +- .../ugorji/go/codec/helper_not_unsafe.go | 16 + .../ugorji/go/codec/helper_unsafe.go | 16 +- vendor/github.com/ugorji/go/codec/msgpack.go | 9 + vendor/github.com/ugorji/go/codec/prebuild.sh | 26 +- vendor/github.com/ugorji/go/codec/simple.go | 9 + vendor/golang.org/x/crypto/blowfish/cipher.go | 2 +- vendor/golang.org/x/crypto/blowfish/const.go | 2 +- .../x/crypto/curve25519/const_amd64.h | 2 +- .../x/crypto/curve25519/const_amd64.s | 2 +- vendor/golang.org/x/crypto/curve25519/doc.go | 2 +- .../x/crypto/curve25519/freeze_amd64.s | 2 +- .../x/crypto/curve25519/ladderstep_amd64.s | 2 +- .../x/crypto/curve25519/mul_amd64.s | 2 +- .../x/crypto/curve25519/square_amd64.s | 2 +- vendor/golang.org/x/crypto/ed25519/ed25519.go | 2 +- vendor/golang.org/x/crypto/ssh/cipher.go | 4 +- vendor/golang.org/x/crypto/ssh/kex.go | 8 +- vendor/golang.org/x/crypto/ssh/keys.go | 6 + vendor/golang.org/x/crypto/ssh/server.go | 71 +- vendor/golang.org/x/net/context/context.go | 102 -- vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go19.go | 109 ++ vendor/golang.org/x/net/http2/go18.go | 2 + vendor/golang.org/x/net/http2/not_go18.go | 2 + vendor/golang.org/x/net/http2/server.go | 19 +- vendor/golang.org/x/net/http2/transport.go | 27 +- vendor/golang.org/x/net/idna/idna.go | 64 +- vendor/golang.org/x/net/trace/events.go | 6 +- vendor/golang.org/x/net/trace/trace.go | 58 +- vendor/golang.org/x/oauth2/internal/token.go | 4 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 9 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 31 +- .../x/sys/unix/zerrors_linux_386.go | 40 + .../x/sys/unix/zerrors_linux_amd64.go | 40 + .../x/sys/unix/zerrors_linux_arm.go | 40 + .../x/sys/unix/zerrors_linux_arm64.go | 40 + .../x/sys/unix/zerrors_linux_mips.go | 40 + .../x/sys/unix/zerrors_linux_mips64.go | 40 + .../x/sys/unix/zerrors_linux_mips64le.go | 40 + .../x/sys/unix/zerrors_linux_mipsle.go | 40 + .../x/sys/unix/zerrors_linux_ppc64.go | 40 + .../x/sys/unix/zerrors_linux_ppc64le.go | 40 + .../x/sys/unix/zerrors_linux_s390x.go | 40 + .../x/sys/unix/zsyscall_linux_386.go | 30 +- .../x/sys/unix/zsyscall_linux_amd64.go | 30 +- .../x/sys/unix/zsyscall_linux_arm.go | 30 +- .../x/sys/unix/zsyscall_linux_arm64.go | 30 +- .../x/sys/unix/zsyscall_linux_mips.go | 30 +- .../x/sys/unix/zsyscall_linux_mips64.go | 30 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 30 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 30 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 30 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 30 +- .../x/sys/unix/zsyscall_linux_s390x.go | 30 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 2 + .../x/sys/unix/ztypes_linux_amd64.go | 2 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2 + .../x/sys/unix/ztypes_linux_arm64.go | 2 + .../x/sys/unix/ztypes_linux_mips.go | 2 + .../x/sys/unix/ztypes_linux_mips64.go | 2 + .../x/sys/unix/ztypes_linux_mips64le.go | 2 + .../x/sys/unix/ztypes_linux_mipsle.go | 2 + .../x/sys/unix/ztypes_linux_ppc64.go | 2 + .../x/sys/unix/ztypes_linux_ppc64le.go | 2 + .../x/sys/unix/ztypes_linux_s390x.go | 2 + .../golang.org/x/sys/windows/dll_windows.go | 1 - .../x/sys/windows/memory_windows.go | 26 + .../x/sys/windows/syscall_windows.go | 3 + .../x/sys/windows/zsyscall_windows.go | 40 + .../x/sys/windows/ztypes_windows.go | 7 - vendor/golang.org/x/text/AUTHORS | 3 - vendor/golang.org/x/text/CONTRIBUTING.md | 31 - vendor/golang.org/x/text/CONTRIBUTORS | 3 - vendor/golang.org/x/text/README | 23 - vendor/golang.org/x/text/codereview.cfg | 1 - vendor/golang.org/x/text/doc.go | 13 - vendor/golang.org/x/text/gen.go | 292 ---- vendor/golang.org/x/text/internal/gen/code.go | 351 ---- vendor/golang.org/x/text/internal/gen/gen.go | 281 ---- vendor/golang.org/x/text/unicode/cldr/base.go | 100 -- vendor/golang.org/x/text/unicode/cldr/cldr.go | 130 -- .../golang.org/x/text/unicode/cldr/collate.go | 359 ---- .../golang.org/x/text/unicode/cldr/decode.go | 171 -- .../golang.org/x/text/unicode/cldr/makexml.go | 400 ----- .../golang.org/x/text/unicode/cldr/resolve.go | 602 ------- .../golang.org/x/text/unicode/cldr/slice.go | 144 -- vendor/golang.org/x/text/unicode/cldr/xml.go | 1456 ----------------- .../api/storage/v1/storage-api.json | 116 +- .../api/storage/v1/storage-gen.go | 337 ++-- vendor/google.golang.org/grpc/LICENSE | 20 +- vendor/google.golang.org/grpc/Makefile | 10 +- vendor/google.golang.org/grpc/README.md | 2 +- vendor/google.golang.org/grpc/balancer.go | 12 +- vendor/google.golang.org/grpc/clientconn.go | 9 +- vendor/google.golang.org/grpc/doc.go | 2 +- vendor/google.golang.org/grpc/go16.go | 3 +- vendor/google.golang.org/grpc/go17.go | 3 +- vendor/google.golang.org/grpc/grpclb.go | 32 +- .../google.golang.org/grpc/grpclog/grpclog.go | 135 ++ .../google.golang.org/grpc/grpclog/logger.go | 69 +- .../grpc/grpclog/loggerv2.go | 204 +++ .../grpc/health/grpc_health_v1/health.pb.go | 176 ++ .../grpc/health/grpc_health_v1/health.proto | 34 + .../google.golang.org/grpc/health/health.go | 70 + .../grpc/metadata/metadata.go | 2 +- vendor/google.golang.org/grpc/rpc_util.go | 8 +- vendor/google.golang.org/grpc/server.go | 31 +- vendor/google.golang.org/grpc/stream.go | 16 +- .../grpc/transport/bdp_estimator.go | 125 ++ .../grpc/transport/control.go | 16 +- .../grpc/transport/handler_server.go | 4 +- .../grpc/transport/http2_client.go | 89 +- .../grpc/transport/http2_server.go | 110 +- .../grpc/transport/http_util.go | 3 +- .../google.golang.org/grpc/transport/log.go | 50 + .../grpc/transport/transport.go | 107 +- .../ory-am/dockertest.v3/dockertest.go | 33 +- vendor/gopkg.in/yaml.v2/decode.go | 7 +- vendor/gopkg.in/yaml.v2/emitterc.go | 8 +- vendor/gopkg.in/yaml.v2/yaml.go | 13 +- vendor/gopkg.in/yaml.v2/yamlh.go | 2 +- vendor/vendor.json | 1078 ++++++------ 378 files changed, 22305 insertions(+), 14008 deletions(-) mode change 100644 => 100755 vendor/github.com/SAP/go-hdb/NOTICE create mode 100644 vendor/github.com/cenk/backoff/tries.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto rename vendor/github.com/docker/docker/pkg/homedir/{homedir.go => homedir_unix.go} (77%) create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path.go create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go delete mode 100644 vendor/github.com/hashicorp/go-msgpack/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/0doc.go delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/README.md delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/binc.go delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/decode.go delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/encode.go delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/helper.go delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go delete mode 100755 vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/rpc.go delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/simple.go delete mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/time.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_client.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_server.go create mode 100644 vendor/github.com/hashicorp/go-plugin/protocol.go delete mode 100644 vendor/github.com/hashicorp/net-rpc-msgpackrpc/README.md delete mode 100644 vendor/github.com/hashicorp/net-rpc-msgpackrpc/client.go delete mode 100644 vendor/github.com/hashicorp/net-rpc-msgpackrpc/codec.go delete mode 100644 vendor/github.com/hashicorp/net-rpc-msgpackrpc/msgpackrpc.go delete mode 100644 vendor/github.com/hashicorp/scada-client/LICENSE delete mode 100644 vendor/github.com/hashicorp/scada-client/README.md delete mode 100644 vendor/github.com/hashicorp/scada-client/client.go delete mode 100644 vendor/github.com/hashicorp/scada-client/provider.go delete mode 100644 vendor/github.com/hashicorp/scada-client/scada/scada.go delete mode 100644 vendor/github.com/hashicorp/scada-client/structs.go create mode 100644 vendor/github.com/mitchellh/cli/autocomplete.go create mode 100644 vendor/github.com/posener/complete/LICENSE.txt create mode 100644 vendor/github.com/posener/complete/args.go create mode 100644 vendor/github.com/posener/complete/cmd/cmd.go create mode 100644 vendor/github.com/posener/complete/cmd/install/bash.go create mode 100644 vendor/github.com/posener/complete/cmd/install/install.go create mode 100644 vendor/github.com/posener/complete/cmd/install/utils.go create mode 100644 vendor/github.com/posener/complete/cmd/install/zsh.go create mode 100644 vendor/github.com/posener/complete/command.go create mode 100644 vendor/github.com/posener/complete/complete.go create mode 100644 vendor/github.com/posener/complete/log.go create mode 100644 vendor/github.com/posener/complete/match/file.go create mode 100644 vendor/github.com/posener/complete/match/match.go create mode 100644 vendor/github.com/posener/complete/match/prefix.go create mode 100644 vendor/github.com/posener/complete/metalinter.json create mode 100644 vendor/github.com/posener/complete/predict.go create mode 100644 vendor/github.com/posener/complete/predict_files.go create mode 100644 vendor/github.com/posener/complete/predict_set.go create mode 100644 vendor/github.com/posener/complete/readme.md create mode 100755 vendor/github.com/posener/complete/test.sh create mode 100644 vendor/github.com/posener/complete/utils.go create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md rename vendor/github.com/{hashicorp/net-rpc-msgpackrpc/LICENSE.md => sirupsen/logrus/LICENSE} (96%) create mode 100644 vendor/github.com/sirupsen/logrus/README.md create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/sirupsen/logrus/doc.go create mode 100644 vendor/github.com/sirupsen/logrus/entry.go create mode 100644 vendor/github.com/sirupsen/logrus/exported.go create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/logger.go create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_appengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/writer.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go delete mode 100644 vendor/golang.org/x/text/AUTHORS delete mode 100644 vendor/golang.org/x/text/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/text/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/text/README delete mode 100644 vendor/golang.org/x/text/codereview.cfg delete mode 100644 vendor/golang.org/x/text/doc.go delete mode 100644 vendor/golang.org/x/text/gen.go delete mode 100644 vendor/golang.org/x/text/internal/gen/code.go delete mode 100644 vendor/golang.org/x/text/internal/gen/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/base.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/cldr.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/collate.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/decode.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/makexml.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/resolve.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/slice.go delete mode 100644 vendor/golang.org/x/text/unicode/cldr/xml.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto create mode 100644 vendor/google.golang.org/grpc/health/health.go create mode 100644 vendor/google.golang.org/grpc/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/transport/log.go diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index 33f1cdb600..5eb06bac55 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20170404" +const Repo = "20170621" // Go returns the Go runtime version. The returned string // has no whitespace. diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index 041e541323..a1b2b6d3dd 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -16,8 +16,11 @@ package storage import ( "fmt" + "net/http" + "reflect" "golang.org/x/net/context" + "google.golang.org/api/googleapi" raw "google.golang.org/api/storage/v1" ) @@ -53,10 +56,11 @@ type ACLRule struct { // ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. type ACLHandle struct { - c *Client - bucket string - object string - isDefault bool + c *Client + bucket string + object string + isDefault bool + userProject string // for requester-pays buckets } // Delete permanently deletes the ACL entry for the given entity. @@ -73,10 +77,10 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { // Set sets the permission level for the given entity. func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error { if a.object != "" { - return a.objectSet(ctx, entity, role) + return a.objectSet(ctx, entity, role, false) } if a.isDefault { - return a.bucketDefaultSet(ctx, entity, role) + return a.objectSet(ctx, entity, role, true) } return a.bucketSet(ctx, entity, role) } @@ -96,8 +100,8 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { var acls *raw.ObjectAccessControls var err error err = runWithRetry(ctx, func() error { - req := a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx) - setClientHeader(req.Header()) + req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) + a.configureCall(req, ctx) acls, err = req.Do() return err }) @@ -107,28 +111,10 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { return toACLRules(acls.Items), nil } -func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error { - acl := &raw.ObjectAccessControl{ - Bucket: a.bucket, - Entity: string(entity), - Role: string(role), - } - err := runWithRetry(ctx, func() error { - req := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx) - setClientHeader(req.Header()) - _, err := req.Do() - return err - }) - if err != nil { - return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) - } - return nil -} - func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { err := runWithRetry(ctx, func() error { - req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx) - setClientHeader(req.Header()) + req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(req, ctx) return req.Do() }) if err != nil { @@ -141,8 +127,8 @@ func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { var acls *raw.BucketAccessControls var err error err = runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx) - setClientHeader(req.Header()) + req := a.c.raw.BucketAccessControls.List(a.bucket) + a.configureCall(req, ctx) acls, err = req.Do() return err }) @@ -164,8 +150,8 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol Role: string(role), } err := runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx) - setClientHeader(req.Header()) + req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) + a.configureCall(req, ctx) _, err := req.Do() return err }) @@ -177,8 +163,8 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { err := runWithRetry(ctx, func() error { - req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx) - setClientHeader(req.Header()) + req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(req, ctx) return req.Do() }) if err != nil { @@ -191,8 +177,8 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { var acls *raw.ObjectAccessControls var err error err = runWithRetry(ctx, func() error { - req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx) - setClientHeader(req.Header()) + req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) + a.configureCall(req, ctx) acls, err = req.Do() return err }) @@ -202,28 +188,42 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { return toACLRules(acls.Items), nil } -func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error { +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + acl := &raw.ObjectAccessControl{ Bucket: a.bucket, Entity: string(entity), Role: string(role), } + var req setRequest + if isBucketDefault { + req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) + } else { + req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) + } + a.configureCall(req, ctx) err := runWithRetry(ctx, func() error { - req := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx) - setClientHeader(req.Header()) _, err := req.Do() return err }) if err != nil { - return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) + if isBucketDefault { + return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } else { + return fmt.Errorf("storage: error updating object ACL entry for bucket %q, object %q, entity %q: %v", a.bucket, a.object, entity, err) + } } return nil } func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { err := runWithRetry(ctx, func() error { - req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx) - setClientHeader(req.Header()) + req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) + a.configureCall(req, ctx) return req.Do() }) if err != nil { @@ -232,6 +232,17 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { return nil } +func (a *ACLHandle) configureCall(call interface { + Header() http.Header +}, ctx context.Context) { + vc := reflect.ValueOf(call) + vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) + if a.userProject != "" { + vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) + } + setClientHeader(call.Header()) +} + func toACLRules(items []*raw.ObjectAccessControl) []ACLRule { r := make([]ACLRule, 0, len(items)) for _, item := range items { diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 6918dc8636..68f298e843 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -35,6 +35,7 @@ type BucketHandle struct { acl ACLHandle defaultObjectACL ACLHandle conds *BucketConditions + userProject string // project for requester-pays buckets } // Bucket returns a BucketHandle, which provides operations on the named bucket. @@ -90,6 +91,9 @@ func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { return nil, err } + if b.userProject != "" { + req.UserProject(b.userProject) + } return req, nil } @@ -119,11 +123,13 @@ func (b *BucketHandle) Object(name string) *ObjectHandle { bucket: b.name, object: name, acl: ACLHandle{ - c: b.c, - bucket: b.name, - object: name, + c: b.c, + bucket: b.name, + object: name, + userProject: b.userProject, }, - gen: -1, + gen: -1, + userProject: b.userProject, } } @@ -153,6 +159,9 @@ func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { return nil, err } + if b.userProject != "" { + req.UserProject(b.userProject) + } return req, nil } @@ -176,6 +185,9 @@ func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPa if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { return nil, err } + if b.userProject != "" { + req.UserProject(b.userProject) + } return req, nil } @@ -215,6 +227,9 @@ type BucketAttrs struct { // Labels are the bucket's labels. Labels map[string]string + + // RequesterPays reports whether the bucket is a Requester Pays bucket. + RequesterPays bool } func newBucket(b *raw.Bucket) *BucketAttrs { @@ -229,6 +244,7 @@ func newBucket(b *raw.Bucket) *BucketAttrs { Created: convertTime(b.TimeCreated), VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, Labels: b.Labels, + RequesterPays: b.Billing != nil && b.Billing.RequesterPays, } acl := make([]ACLRule, len(b.Acl)) for i, rule := range b.Acl { @@ -277,6 +293,10 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { if b.VersioningEnabled { v = &raw.BucketVersioning{Enabled: true} } + var bb *raw.BucketBilling + if b.RequesterPays { + bb = &raw.BucketBilling{RequesterPays: true} + } return &raw.Bucket{ Name: b.Name, DefaultObjectAcl: dACL, @@ -285,6 +305,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { Acl: acl, Versioning: v, Labels: labels, + Billing: bb, } } @@ -292,6 +313,9 @@ type BucketAttrsToUpdate struct { // VersioningEnabled, if set, updates whether the bucket uses versioning. VersioningEnabled optional.Bool + // RequesterPays, if set, updates whether the bucket is a Requester Pays bucket. + RequesterPays optional.Bool + setLabels map[string]string deleteLabels map[string]bool } @@ -322,6 +346,12 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { ForceSendFields: []string{"Enabled"}, } } + if ua.RequesterPays != nil { + rb.Billing = &raw.BucketBilling{ + RequesterPays: optional.ToBool(ua.RequesterPays), + ForceSendFields: []string{"RequesterPays"}, + } + } if ua.setLabels != nil || ua.deleteLabels != nil { rb.Labels = map[string]string{} for k, v := range ua.setLabels { @@ -373,6 +403,17 @@ func (c *BucketConditions) validate(method string) error { return nil } +// UserProject returns a new BucketHandle that passes the project ID as the user +// project for all subsequent calls. A user project is required for all operations +// on requester-pays buckets. +func (b *BucketHandle) UserProject(projectID string) *BucketHandle { + b2 := *b + b2.userProject = projectID + b2.acl.userProject = projectID + b2.defaultObjectACL.userProject = projectID + return &b2 +} + // applyBucketConds modifies the provided call using the conditions in conds. // call is something that quacks like a *raw.WhateverCall. func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { @@ -450,6 +491,9 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) req.Prefix(it.query.Prefix) req.Versions(it.query.Versions) req.PageToken(pageToken) + if it.bucket.userProject != "" { + req.UserProject(it.bucket.userProject) + } if pageSize > 0 { req.MaxResults(int64(pageSize)) } diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index 59bad6776f..d0a999c1b2 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -25,6 +25,9 @@ import ( // CopierFrom creates a Copier that can copy src to dst. // You can immediately call Run on the returned Copier, or // you can configure it first. +// +// For Requester Pays buckets, the user project of dst is billed, unless it is empty, +// in which case the user project of src is billed. func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { return &Copier{dst: dst, src: src} } @@ -42,7 +45,7 @@ type Copier struct { RewriteToken string // ProgressFunc can be used to monitor the progress of a multi-RPC copy - // operation. If ProgressFunc is not nil and CopyFrom requires multiple + // operation. If ProgressFunc is not nil and copying requires multiple // calls to the underlying service (see // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then // ProgressFunc will be invoked after each call with the number of bytes of @@ -73,7 +76,7 @@ func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) { // does not cause any problems. rawObject := c.ObjectAttrs.toRawObject("") for { - res, err := c.callRewrite(ctx, c.src, rawObject) + res, err := c.callRewrite(ctx, rawObject) if err != nil { return nil, err } @@ -86,8 +89,8 @@ func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) { } } -func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw.Object) (*raw.RewriteResponse, error) { - call := c.dst.c.raw.Objects.Rewrite(src.bucket, src.object, c.dst.bucket, c.dst.object, rawObj) +func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { + call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) call.Context(ctx).Projection("full") if c.RewriteToken != "" { @@ -96,6 +99,11 @@ func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { return nil, err } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } else if c.src.userProject != "" { + call.UserProject(c.src.userProject) + } if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { return nil, err } @@ -128,6 +136,8 @@ func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { } // A Composer composes source objects into a destination object. +// +// For Requester Pays buckets, the user project of dst is billed. type Composer struct { // ObjectAttrs are optional attributes to set on the destination object. // Any attributes must be initialized before any calls on the Composer. Nil @@ -174,6 +184,9 @@ func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) { if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { return nil, err } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { return nil, err } diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index f5012fb106..374027c6b7 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -262,6 +262,7 @@ type ObjectHandle struct { gen int64 // a negative value indicates latest conds *Conditions encryptionKey []byte // AES-256 key + userProject string // for requester-pays buckets } // ACL provides access to the object's access control list. @@ -314,6 +315,9 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { return nil, err } + if o.userProject != "" { + call.UserProject(o.userProject) + } if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { return nil, err } @@ -356,7 +360,7 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( } if uattrs.ContentEncoding != nil { attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) - forceSendFields = append(forceSendFields, "ContentType") + forceSendFields = append(forceSendFields, "ContentEncoding") } if uattrs.ContentDisposition != nil { attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) @@ -388,6 +392,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( if err := applyConds("Update", o.gen, o.conds, call); err != nil { return nil, err } + if o.userProject != "" { + call.UserProject(o.userProject) + } if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { return nil, err } @@ -434,6 +441,10 @@ func (o *ObjectHandle) Delete(ctx context.Context) error { if err := applyConds("Delete", o.gen, o.conds, call); err != nil { return err } + if o.userProject != "" { + call.UserProject(o.userProject) + } + // Encryption doesn't apply to Delete. setClientHeader(call.Header()) err := runWithRetry(ctx, func() error { return call.Do() }) switch e := err.(type) { @@ -491,6 +502,9 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) } else if length > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) } + if o.userProject != "" { + req.Header.Set("X-Goog-User-Project", o.userProject) + } if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { return nil, err } diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index 809dfb1bdb..19f4f0e105 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -119,6 +119,9 @@ func (w *Writer) open() error { var resp *raw.Object err := applyConds("NewWriter", w.o.gen, w.o.conds, call) if err == nil { + if w.o.userProject != "" { + call.UserProject(w.o.userProject) + } setClientHeader(call.Header()) // We will only retry here if the initial POST, which obtains a URI for // the resumable upload, fails with a retryable error. The upload itself diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index dd9eb386cb..e3a19fac3f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -174,6 +174,9 @@ type BlobRange struct { } func (br BlobRange) String() string { + if br.End == 0 { + return fmt.Sprintf("bytes=%d-", br.Start) + } return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) } @@ -192,7 +195,7 @@ func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return nil, err } - if err := b.writePropoerties(resp.headers); err != nil { + if err := b.writeProperties(resp.headers, true); err != nil { return resp.body, err } return resp.body, nil @@ -212,7 +215,9 @@ func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { return nil, err } - if err := b.writePropoerties(resp.headers); err != nil { + // Content-Length header should not be updated, as the service returns the range length + // (which is not alwys the full blob length) + if err := b.writeProperties(resp.headers, false); err != nil { return resp.body, err } return resp.body, nil @@ -225,7 +230,9 @@ func (b *Blob) getRange(options *GetBlobRangeOptions) (*storageResponse, error) if options != nil { if options.Range != nil { headers["Range"] = options.Range.String() - headers["x-ms-range-get-content-md5"] = fmt.Sprintf("%v", options.GetRangeContentMD5) + if options.GetRangeContentMD5 { + headers["x-ms-range-get-content-md5"] = "true" + } } if options.GetBlobOptions != nil { headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) @@ -322,18 +329,20 @@ func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return err } - return b.writePropoerties(resp.headers) + return b.writeProperties(resp.headers, true) } -func (b *Blob) writePropoerties(h http.Header) error { +func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { var err error - var contentLength int64 - contentLengthStr := h.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return err + contentLength := b.Properties.ContentLength + if includeContentLen { + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return err + } } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go index 6a180d48d7..cb773abee0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -91,12 +91,21 @@ func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions var n int64 var err error if blob != nil { - buf := &bytes.Buffer{} - n, err = io.Copy(buf, blob) - if err != nil { - return err + type lener interface { + Len() int } - blob = buf + // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc. + if l, ok := blob.(lener); ok { + n = int64(l.Len()) + } else { + var buf bytes.Buffer + n, err = io.Copy(&buf, blob) + if err != nil { + return err + } + blob = &buf + } + headers["Content-Length"] = strconv.FormatInt(n, 10) } b.Properties.ContentLength = n diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go index 169f68dbef..3286a9cb5e 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -5,7 +5,7 @@ import ( "io/ioutil" "os" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var logger *logrus.Logger diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go index 4d858ed611..48998bb051 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -9,7 +9,7 @@ import ( "strconv" "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var logger *logrus.Logger diff --git a/vendor/github.com/DataDog/datadog-go/statsd/statsd.go b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go index d55f85d4b6..0ea7189161 100644 --- a/vendor/github.com/DataDog/datadog-go/statsd/statsd.go +++ b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go @@ -59,14 +59,13 @@ const MaxUDPPayloadSize = 65467 Stat suffixes */ var ( - gaugeSuffix = []byte("|g") - countSuffix = []byte("|c") - histogramSuffix = []byte("|h") - distributionSuffix = []byte("|d") - decrSuffix = []byte("-1|c") - incrSuffix = []byte("1|c") - setSuffix = []byte("|s") - timingSuffix = []byte("|ms") + gaugeSuffix = []byte("|g") + countSuffix = []byte("|c") + histogramSuffix = []byte("|h") + decrSuffix = []byte("-1|c") + incrSuffix = []byte("1|c") + setSuffix = []byte("|s") + timingSuffix = []byte("|ms") ) // A Client is a handle for sending udp messages to dogstatsd. It is safe to @@ -290,11 +289,6 @@ func (c *Client) Histogram(name string, value float64, tags []string, rate float return c.send(name, value, histogramSuffix, tags, rate) } -// Distribution tracks accurate global percentiles of a set of values. -func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error { - return c.send(name, value, distributionSuffix, tags, rate) -} - // Decr is just Count of -1 func (c *Client) Decr(name string, tags []string, rate float64) error { return c.send(name, nil, decrSuffix, tags, rate) diff --git a/vendor/github.com/SAP/go-hdb/NOTICE b/vendor/github.com/SAP/go-hdb/NOTICE old mode 100644 new mode 100755 diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md index 63d415e12d..c443aed097 100644 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,12 @@ +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + # 1.0.0 * Officially changed name to lower-case diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md index cbe8b69625..82aeb4eef3 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -1,22 +1,24 @@ # Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** +the standard library logger. [Godoc][godoc]. -**Seeing weird case-sensitive problems?** Unfortunately, the author failed to -realize the consequences of renaming to lower-case. Due to the Go package -environment, this caused issues. Regretfully, there's no turning back now. +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. Everything using `logrus` will need to use the lower-case: `github.com/sirupsen/logrus`. Any package that isn't, should be changed. -I am terribly sorry for this inconvenience. Logrus strives hard for backwards -compatibility, and the author failed to realize the cascading consequences of -such a name-change. To fix Glide, see [these +To fix Glide, see [these comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +**Are you interested in assisting in maintaining Logrus?** Currently I have a +lot of obligations, and I am unable to provide Logrus with the maintainership it +needs. If you'd like to help, please reach out to me at `simon at author's +username dot com`. Nicely color-coded in development (when a TTY is attached, otherwise just plain text): @@ -266,6 +268,7 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | | [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | | [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | | [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | | [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | | [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | @@ -280,7 +283,7 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | | [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | | [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | | [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | | [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | | [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | @@ -449,13 +452,13 @@ Logrus has a built in facility for asserting the presence of log messages. This ```go import( "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/null" + "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" "testing" ) func TestSomething(t*testing.T){ - logger, hook := null.NewNullLogger() + logger, hook := test.NewNullLogger() logger.Error("Helloerror") assert.Equal(t, 1, len(hook.Entries)) diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go index 320e5d5b8b..5bf582ef27 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -35,6 +35,7 @@ type Entry struct { Time time.Time // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level // Message passed to Debug, Info, Warn, Error, Fatal or Panic diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go index 1aeaa90ba2..013183edab 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -31,7 +31,7 @@ func SetFormatter(formatter Formatter) { func SetLevel(level Level) { std.mu.Lock() defer std.mu.Unlock() - std.setLevel(level) + std.SetLevel(level) } // GetLevel returns the standard logger level. diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go index 370fff5d1b..b44966f973 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -312,6 +312,6 @@ func (logger *Logger) level() Level { return Level(atomic.LoadUint32((*uint32)(&logger.Level))) } -func (logger *Logger) setLevel(level Level) { +func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go index ba88854061..cf3f17f2f8 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -14,7 +14,7 @@ const ( red = 31 green = 32 yellow = 33 - blue = 34 + blue = 36 gray = 37 ) @@ -52,10 +52,6 @@ type TextFormatter struct { // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool - // QuoteCharacter can be set to the override the default quoting character " - // with something else. For example: ', or `. - QuoteCharacter string - // Whether the logger's out is to a terminal isTerminal bool @@ -63,9 +59,6 @@ type TextFormatter struct { } func (f *TextFormatter) init(entry *Entry) { - if len(f.QuoteCharacter) == 0 { - f.QuoteCharacter = "\"" - } if entry.Logger != nil { f.isTerminal = IsTerminal(entry.Logger.Out) } @@ -153,7 +146,7 @@ func (f *TextFormatter) needsQuoting(text string) bool { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { return true } } @@ -169,21 +162,14 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf } func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - switch value := value.(type) { - case string: - if !f.needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter) - } - case error: - errmsg := value.Error() - if !f.needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter) - } - default: - fmt.Fprint(b, value) + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 1313478f27..e25a460fba 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -15,11 +15,11 @@ import ( // the MaxRetries method: // // type retryer struct { -// service.DefaultRetryer +// client.DefaultRetryer // } // // // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index d1f31f1c65..ae3a286960 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -95,7 +95,7 @@ type Config struct { // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. + // the client.DefaultRetryer will be used. // // When both Retryer and MaxRetries are non-nil, the former is used and // the latter ignored. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 63bdaad76f..ba0c07b25b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -261,6 +261,7 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -313,9 +314,12 @@ var awsPartition = partition{ "athena": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "autoscaling": service{ @@ -342,9 +346,14 @@ var awsPartition = partition{ "batch": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "budgets": service{ @@ -460,10 +469,13 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -472,8 +484,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -509,11 +523,14 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -975,8 +992,10 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "health": service{ @@ -1099,6 +1118,7 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -1465,9 +1485,16 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "snowball": service{ @@ -1867,6 +1894,18 @@ var awscnPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ @@ -1980,6 +2019,12 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "storagegateway": service{ Endpoints: endpoints{ @@ -2202,6 +2247,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "s3": service{ Defaults: endpoint{ SignatureVersions: []string{"s3", "s3v4"}, @@ -2219,6 +2270,12 @@ var awsusgovPartition = partition{ }, }, }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "snowball": service{ Endpoints: endpoints{ @@ -2242,6 +2299,12 @@ var awsusgovPartition = partition{ }, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index db87188e20..3babb5abdb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType { // Matches returns true if the v LogLevel is enabled by this LogLevel. Should be // used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. +// LogLevel is nil, will default to LogOff comparison. func (l *LogLevelType) Matches(v LogLevelType) bool { c := l.Value() return c&v == v } // AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default // to LogOff comparison. func (l *LogLevelType) AtLeast(v LogLevelType) bool { c := l.Value() diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 8d369c1b8c..2c05dbdc2e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -8,7 +8,7 @@ import ( ) // Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer +// The default implementation used by most services is the client.DefaultRetryer // structure, which contains basic retry logic using exponential backoff. type Retryer interface { RetryRules(*Request) time.Duration diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 6f7a4b1df1..b7da95ab6e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -45,7 +45,7 @@ // If signing a request intended for HTTP2 server, and you're using Go 1.6.2 // through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the // request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP // message. URL.Opaque generally will force Go to make requests with absolute URL. // URL.RawPath does not do this, but RawPath must be a valid escaping of Path // or url.EscapedPath will ignore the RawPath escaping. @@ -604,7 +604,11 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { headerValues := make([]string, len(headers)) for i, k := range headers { if k == "host" { - headerValues[i] = "host:" + ctx.Request.URL.Host + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } } else { headerValues[i] = k + ":" + strings.Join(ctx.SignedHeaderVals[k], ",") @@ -724,24 +728,24 @@ func stripExcessSpaces(headerVals []string) []string { trimmed := strings.TrimSpace(str) idx := strings.Index(trimmed, doubleSpaces) - var buf []byte - for idx > -1 { - // Multiple adjacent spaces found - if buf == nil { - // first time create the buffer - buf = []byte(trimmed) - } + if idx < 0 { + vals[i] = trimmed + continue + } + buf := []byte(trimmed) + for idx > -1 { stripToIdx := -1 for j := idx + 1; j < len(buf); j++ { if buf[j] != ' ' { buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j + stripToIdx = j - idx - 1 break } } if stripToIdx >= 0 { + // Find next double space idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) if idx >= 0 { idx += stripToIdx @@ -751,11 +755,7 @@ func stripExcessSpaces(headerVals []string) []string { } } - if buf != nil { - vals[i] = string(buf) - } else { - vals[i] = trimmed - } + vals[i] = string(buf) } return vals } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index eada54bb82..5d4154ac10 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.8.43" +const SDKVersion = "1.10.12" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index afbe5bb584..bd13ab77ee 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -3610,6 +3610,85 @@ func (c *EC2) CreateNetworkInterfaceWithContext(ctx aws.Context, input *CreateNe return out, req.Send() } +const opCreateNetworkInterfacePermission = "CreateNetworkInterfacePermission" + +// CreateNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkInterfacePermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateNetworkInterfacePermission for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateNetworkInterfacePermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateNetworkInterfacePermissionRequest method. +// req, resp := client.CreateNetworkInterfacePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermission +func (c *EC2) CreateNetworkInterfacePermissionRequest(input *CreateNetworkInterfacePermissionInput) (req *request.Request, output *CreateNetworkInterfacePermissionOutput) { + op := &request.Operation{ + Name: opCreateNetworkInterfacePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkInterfacePermissionInput{} + } + + output = &CreateNetworkInterfacePermissionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateNetworkInterfacePermission API operation for Amazon Elastic Compute Cloud. +// +// Grants an AWS authorized partner account permission to attach the specified +// network interface to an instance in their account. +// +// You can grant permission to a single AWS account only, and only one account +// at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateNetworkInterfacePermission for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermission +func (c *EC2) CreateNetworkInterfacePermission(input *CreateNetworkInterfacePermissionInput) (*CreateNetworkInterfacePermissionOutput, error) { + req, out := c.CreateNetworkInterfacePermissionRequest(input) + return out, req.Send() +} + +// CreateNetworkInterfacePermissionWithContext is the same as CreateNetworkInterfacePermission with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNetworkInterfacePermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNetworkInterfacePermissionWithContext(ctx aws.Context, input *CreateNetworkInterfacePermissionInput, opts ...request.Option) (*CreateNetworkInterfacePermissionOutput, error) { + req, out := c.CreateNetworkInterfacePermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreatePlacementGroup = "CreatePlacementGroup" // CreatePlacementGroupRequest generates a "aws/request.Request" representing the @@ -5816,6 +5895,84 @@ func (c *EC2) DeleteNetworkInterfaceWithContext(ctx aws.Context, input *DeleteNe return out, req.Send() } +const opDeleteNetworkInterfacePermission = "DeleteNetworkInterfacePermission" + +// DeleteNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkInterfacePermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteNetworkInterfacePermission for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNetworkInterfacePermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNetworkInterfacePermissionRequest method. +// req, resp := client.DeleteNetworkInterfacePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermission +func (c *EC2) DeleteNetworkInterfacePermissionRequest(input *DeleteNetworkInterfacePermissionInput) (req *request.Request, output *DeleteNetworkInterfacePermissionOutput) { + op := &request.Operation{ + Name: opDeleteNetworkInterfacePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkInterfacePermissionInput{} + } + + output = &DeleteNetworkInterfacePermissionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteNetworkInterfacePermission API operation for Amazon Elastic Compute Cloud. +// +// Deletes a permission for a network interface. By default, you cannot delete +// the permission if the account for which you're removing the permission has +// attached the network interface to an instance. However, you can force delete +// the permission, regardless of any attachment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteNetworkInterfacePermission for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermission +func (c *EC2) DeleteNetworkInterfacePermission(input *DeleteNetworkInterfacePermissionInput) (*DeleteNetworkInterfacePermissionOutput, error) { + req, out := c.DeleteNetworkInterfacePermissionRequest(input) + return out, req.Send() +} + +// DeleteNetworkInterfacePermissionWithContext is the same as DeleteNetworkInterfacePermission with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNetworkInterfacePermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNetworkInterfacePermissionWithContext(ctx aws.Context, input *DeleteNetworkInterfacePermissionInput, opts ...request.Option) (*DeleteNetworkInterfacePermissionOutput, error) { + req, out := c.DeleteNetworkInterfacePermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeletePlacementGroup = "DeletePlacementGroup" // DeletePlacementGroupRequest generates a "aws/request.Request" representing the @@ -8982,7 +9139,8 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) // DescribeInstanceStatus API operation for Amazon Elastic Compute Cloud. // // Describes the status of one or more instances. By default, only running instances -// are described, unless specified otherwise. +// are described, unless you specifically indicate to return the status of all +// instances. // // Instance status includes the following components: // @@ -9742,6 +9900,81 @@ func (c *EC2) DescribeNetworkInterfaceAttributeWithContext(ctx aws.Context, inpu return out, req.Send() } +const opDescribeNetworkInterfacePermissions = "DescribeNetworkInterfacePermissions" + +// DescribeNetworkInterfacePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkInterfacePermissions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DescribeNetworkInterfacePermissions for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNetworkInterfacePermissions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNetworkInterfacePermissionsRequest method. +// req, resp := client.DescribeNetworkInterfacePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissions +func (c *EC2) DescribeNetworkInterfacePermissionsRequest(input *DescribeNetworkInterfacePermissionsInput) (req *request.Request, output *DescribeNetworkInterfacePermissionsOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfacePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfacePermissionsInput{} + } + + output = &DescribeNetworkInterfacePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNetworkInterfacePermissions API operation for Amazon Elastic Compute Cloud. +// +// Describes the permissions for your network interfaces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeNetworkInterfacePermissions for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissions +func (c *EC2) DescribeNetworkInterfacePermissions(input *DescribeNetworkInterfacePermissionsInput) (*DescribeNetworkInterfacePermissionsOutput, error) { + req, out := c.DescribeNetworkInterfacePermissionsRequest(input) + return out, req.Send() +} + +// DescribeNetworkInterfacePermissionsWithContext is the same as DescribeNetworkInterfacePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNetworkInterfacePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkInterfacePermissionsWithContext(ctx aws.Context, input *DescribeNetworkInterfacePermissionsInput, opts ...request.Option) (*DescribeNetworkInterfacePermissionsOutput, error) { + req, out := c.DescribeNetworkInterfacePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" // DescribeNetworkInterfacesRequest generates a "aws/request.Request" representing the @@ -24861,6 +25094,113 @@ func (s *CreateNetworkInterfaceOutput) SetNetworkInterface(v *NetworkInterface) return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermissionRequest +type CreateNetworkInterfacePermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + AwsAccountId *string `type:"string"` + + // The AWS service. Currently not supported. + AwsService *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `type:"string" required:"true"` + + // The type of permission to grant. + // + // Permission is a required field + Permission *string `type:"string" required:"true" enum:"InterfacePermissionType"` +} + +// String returns the string representation +func (s CreateNetworkInterfacePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfacePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNetworkInterfacePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNetworkInterfacePermissionInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + if s.Permission == nil { + invalidParams.Add(request.NewErrParamRequired("Permission")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateNetworkInterfacePermissionInput) SetAwsAccountId(v string) *CreateNetworkInterfacePermissionInput { + s.AwsAccountId = &v + return s +} + +// SetAwsService sets the AwsService field's value. +func (s *CreateNetworkInterfacePermissionInput) SetAwsService(v string) *CreateNetworkInterfacePermissionInput { + s.AwsService = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateNetworkInterfacePermissionInput) SetDryRun(v bool) *CreateNetworkInterfacePermissionInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *CreateNetworkInterfacePermissionInput) SetNetworkInterfaceId(v string) *CreateNetworkInterfacePermissionInput { + s.NetworkInterfaceId = &v + return s +} + +// SetPermission sets the Permission field's value. +func (s *CreateNetworkInterfacePermissionInput) SetPermission(v string) *CreateNetworkInterfacePermissionInput { + s.Permission = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermissionResult +type CreateNetworkInterfacePermissionOutput struct { + _ struct{} `type:"structure"` + + // Information about the permission for the network interface. + InterfacePermission *NetworkInterfacePermission `locationName:"interfacePermission" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkInterfacePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfacePermissionOutput) GoString() string { + return s.String() +} + +// SetInterfacePermission sets the InterfacePermission field's value. +func (s *CreateNetworkInterfacePermissionOutput) SetInterfacePermission(v *NetworkInterfacePermission) *CreateNetworkInterfacePermissionOutput { + s.InterfacePermission = v + return s +} + // Contains the parameters for CreatePlacementGroup. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreatePlacementGroupRequest type CreatePlacementGroupInput struct { @@ -27325,6 +27665,91 @@ func (s DeleteNetworkInterfaceOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermissionRequest +type DeleteNetworkInterfacePermissionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Specify true to remove the permission even if the network interface is attached + // to an instance. + Force *bool `type:"boolean"` + + // The ID of the network interface permission. + // + // NetworkInterfacePermissionId is a required field + NetworkInterfacePermissionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkInterfacePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfacePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNetworkInterfacePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkInterfacePermissionInput"} + if s.NetworkInterfacePermissionId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfacePermissionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteNetworkInterfacePermissionInput) SetDryRun(v bool) *DeleteNetworkInterfacePermissionInput { + s.DryRun = &v + return s +} + +// SetForce sets the Force field's value. +func (s *DeleteNetworkInterfacePermissionInput) SetForce(v bool) *DeleteNetworkInterfacePermissionInput { + s.Force = &v + return s +} + +// SetNetworkInterfacePermissionId sets the NetworkInterfacePermissionId field's value. +func (s *DeleteNetworkInterfacePermissionInput) SetNetworkInterfacePermissionId(v string) *DeleteNetworkInterfacePermissionInput { + s.NetworkInterfacePermissionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermissionResult +type DeleteNetworkInterfacePermissionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds, otherwise returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteNetworkInterfacePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfacePermissionOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DeleteNetworkInterfacePermissionOutput) SetReturn(v bool) *DeleteNetworkInterfacePermissionOutput { + s.Return = &v + return s +} + // Contains the parameters for DeletePlacementGroup. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeletePlacementGroupRequest type DeletePlacementGroupInput struct { @@ -31986,6 +32411,105 @@ func (s *DescribeNetworkInterfaceAttributeOutput) SetSourceDestCheck(v *Attribut return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissionsRequest +type DescribeNetworkInterfacePermissionsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // * network-interface-permission.network-interface-permission-id - The ID + // of the permission. + // + // * network-interface-permission.network-interface-id - The ID of the network + // interface. + // + // * network-interface-permission.aws-account-id - The AWS account ID. + // + // * network-interface-permission.aws-service - The AWS service. + // + // * network-interface-permission.permission - The type of permission (INSTANCE-ATTACH + // | EIP-ASSOCIATE). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. If + // this parameter is not specified, up to 50 results are returned by default. + MaxResults *int64 `type:"integer"` + + // One or more network interface permission IDs. + NetworkInterfacePermissionIds []*string `locationName:"NetworkInterfacePermissionId" type:"list"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacePermissionsInput) GoString() string { + return s.String() +} + +// SetFilters sets the Filters field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetFilters(v []*Filter) *DescribeNetworkInterfacePermissionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetMaxResults(v int64) *DescribeNetworkInterfacePermissionsInput { + s.MaxResults = &v + return s +} + +// SetNetworkInterfacePermissionIds sets the NetworkInterfacePermissionIds field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetNetworkInterfacePermissionIds(v []*string) *DescribeNetworkInterfacePermissionsInput { + s.NetworkInterfacePermissionIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetNextToken(v string) *DescribeNetworkInterfacePermissionsInput { + s.NextToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissionsResult +type DescribeNetworkInterfacePermissionsOutput struct { + _ struct{} `type:"structure"` + + // The network interface permissions. + NetworkInterfacePermissions []*NetworkInterfacePermission `locationName:"networkInterfacePermissions" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacePermissionsOutput) GoString() string { + return s.String() +} + +// SetNetworkInterfacePermissions sets the NetworkInterfacePermissions field's value. +func (s *DescribeNetworkInterfacePermissionsOutput) SetNetworkInterfacePermissions(v []*NetworkInterfacePermission) *DescribeNetworkInterfacePermissionsOutput { + s.NetworkInterfacePermissions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkInterfacePermissionsOutput) SetNextToken(v string) *DescribeNetworkInterfacePermissionsOutput { + s.NextToken = &v + return s +} + // Contains the parameters for DescribeNetworkInterfaces. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacesRequest type DescribeNetworkInterfacesInput struct { @@ -33764,7 +34288,7 @@ type DescribeSnapshotsInput struct { // // * owner-alias - Value from an Amazon-maintained list (amazon | aws-marketplace // | microsoft) of snapshot owners. Not to be confused with the user-configured - // AWS account alias, which is set from the IAM consolew. + // AWS account alias, which is set from the IAM console. // // * owner-id - The ID of the AWS account that owns the snapshot. // @@ -43905,7 +44429,7 @@ type ModifyInstanceAttributeInput struct { BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // If the value is true, you can't terminate the instance using the Amazon EC2 - // console, CLI, or API; otherwise, you can. You cannot use this paramater for + // console, CLI, or API; otherwise, you can. You cannot use this parameter for // Spot Instances. DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` @@ -46208,6 +46732,110 @@ func (s *NetworkInterfaceIpv6Address) SetIpv6Address(v string) *NetworkInterface return s } +// Describes a permission for a network interface. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfacePermission +type NetworkInterfacePermission struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + AwsAccountId *string `locationName:"awsAccountId" type:"string"` + + // The AWS service. + AwsService *string `locationName:"awsService" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the network interface permission. + NetworkInterfacePermissionId *string `locationName:"networkInterfacePermissionId" type:"string"` + + // The type of permission. + Permission *string `locationName:"permission" type:"string" enum:"InterfacePermissionType"` + + // Information about the state of the permission. + PermissionState *NetworkInterfacePermissionState `locationName:"permissionState" type:"structure"` +} + +// String returns the string representation +func (s NetworkInterfacePermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePermission) GoString() string { + return s.String() +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *NetworkInterfacePermission) SetAwsAccountId(v string) *NetworkInterfacePermission { + s.AwsAccountId = &v + return s +} + +// SetAwsService sets the AwsService field's value. +func (s *NetworkInterfacePermission) SetAwsService(v string) *NetworkInterfacePermission { + s.AwsService = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *NetworkInterfacePermission) SetNetworkInterfaceId(v string) *NetworkInterfacePermission { + s.NetworkInterfaceId = &v + return s +} + +// SetNetworkInterfacePermissionId sets the NetworkInterfacePermissionId field's value. +func (s *NetworkInterfacePermission) SetNetworkInterfacePermissionId(v string) *NetworkInterfacePermission { + s.NetworkInterfacePermissionId = &v + return s +} + +// SetPermission sets the Permission field's value. +func (s *NetworkInterfacePermission) SetPermission(v string) *NetworkInterfacePermission { + s.Permission = &v + return s +} + +// SetPermissionState sets the PermissionState field's value. +func (s *NetworkInterfacePermission) SetPermissionState(v *NetworkInterfacePermissionState) *NetworkInterfacePermission { + s.PermissionState = v + return s +} + +// Describes the state of a network interface permission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfacePermissionState +type NetworkInterfacePermissionState struct { + _ struct{} `type:"structure"` + + // The state of the permission. + State *string `locationName:"state" type:"string" enum:"NetworkInterfacePermissionStateCode"` + + // A status message, if applicable. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfacePermissionState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePermissionState) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *NetworkInterfacePermissionState) SetState(v string) *NetworkInterfacePermissionState { + s.State = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *NetworkInterfacePermissionState) SetStatusMessage(v string) *NetworkInterfacePermissionState { + s.StatusMessage = &v + return s +} + // Describes the private IPv4 address of a network interface. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfacePrivateIpAddress type NetworkInterfacePrivateIpAddress struct { @@ -57876,6 +58504,15 @@ const ( // InstanceTypeG28xlarge is a InstanceType enum value InstanceTypeG28xlarge = "g2.8xlarge" + // InstanceTypeG34xlarge is a InstanceType enum value + InstanceTypeG34xlarge = "g3.4xlarge" + + // InstanceTypeG38xlarge is a InstanceType enum value + InstanceTypeG38xlarge = "g3.8xlarge" + + // InstanceTypeG316xlarge is a InstanceType enum value + InstanceTypeG316xlarge = "g3.16xlarge" + // InstanceTypeCg14xlarge is a InstanceType enum value InstanceTypeCg14xlarge = "cg1.4xlarge" @@ -57907,6 +58544,14 @@ const ( InstanceTypeF116xlarge = "f1.16xlarge" ) +const ( + // InterfacePermissionTypeInstanceAttach is a InterfacePermissionType enum value + InterfacePermissionTypeInstanceAttach = "INSTANCE-ATTACH" + + // InterfacePermissionTypeEipAssociate is a InterfacePermissionType enum value + InterfacePermissionTypeEipAssociate = "EIP-ASSOCIATE" +) + const ( // ListingStateAvailable is a ListingState enum value ListingStateAvailable = "available" @@ -57988,6 +58633,20 @@ const ( NetworkInterfaceAttributeAttachment = "attachment" ) +const ( + // NetworkInterfacePermissionStateCodePending is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodePending = "pending" + + // NetworkInterfacePermissionStateCodeGranted is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodeGranted = "granted" + + // NetworkInterfacePermissionStateCodeRevoking is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodeRevoking = "revoking" + + // NetworkInterfacePermissionStateCodeRevoked is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodeRevoked = "revoked" +) + const ( // NetworkInterfaceStatusAvailable is a NetworkInterfaceStatus enum value NetworkInterfaceStatusAvailable = "available" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 0435398348..74211d5fe2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -7899,6 +7899,9 @@ type CreateMultipartUploadInput struct { // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. @@ -8060,6 +8063,12 @@ func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartU return s } +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + // SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { s.WebsiteRedirectLocation = &v diff --git a/vendor/github.com/boombuler/barcode/barcode.go b/vendor/github.com/boombuler/barcode/barcode.go index 3479c7bc20..25f4a693db 100644 --- a/vendor/github.com/boombuler/barcode/barcode.go +++ b/vendor/github.com/boombuler/barcode/barcode.go @@ -2,6 +2,21 @@ package barcode import "image" +const ( + TypeAztec = "Aztec" + TypeCodabar = "Codabar" + TypeCode128 = "Code 128" + TypeCode39 = "Code 39" + TypeCode93 = "Code 93" + TypeDataMatrix = "DataMatrix" + TypeEAN8 = "EAN 8" + TypeEAN13 = "EAN 13" + TypePDF = "PDF417" + TypeQR = "QR Code" + Type2of5 = "2 of 5" + Type2of5Interleaved = "2 of 5 (interleaved)" +) + // Contains some meta information about a barcode type Metadata struct { // the name of the barcode kind diff --git a/vendor/github.com/boombuler/barcode/qr/qrcode.go b/vendor/github.com/boombuler/barcode/qr/qrcode.go index b7ac26d748..13607604bb 100644 --- a/vendor/github.com/boombuler/barcode/qr/qrcode.go +++ b/vendor/github.com/boombuler/barcode/qr/qrcode.go @@ -20,7 +20,7 @@ func (qr *qrcode) Content() string { } func (qr *qrcode) Metadata() barcode.Metadata { - return barcode.Metadata{"QR Code", 2} + return barcode.Metadata{barcode.TypeQR, 2} } func (qr *qrcode) ColorModel() color.Model { diff --git a/vendor/github.com/cenk/backoff/exponential.go b/vendor/github.com/cenk/backoff/exponential.go index cc2a164f28..9a6addf075 100644 --- a/vendor/github.com/cenk/backoff/exponential.go +++ b/vendor/github.com/cenk/backoff/exponential.go @@ -63,6 +63,7 @@ type ExponentialBackOff struct { currentInterval time.Duration startTime time.Time + random *rand.Rand } // Clock is an interface that returns current time for BackOff. @@ -88,6 +89,7 @@ func NewExponentialBackOff() *ExponentialBackOff { MaxInterval: DefaultMaxInterval, MaxElapsedTime: DefaultMaxElapsedTime, Clock: SystemClock, + random: rand.New(rand.NewSource(time.Now().UnixNano())), } b.Reset() return b @@ -116,7 +118,10 @@ func (b *ExponentialBackOff) NextBackOff() time.Duration { return Stop } defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + if b.random == nil { + b.random = rand.New(rand.NewSource(time.Now().UnixNano())) + } + return getRandomValueFromInterval(b.RandomizationFactor, b.random.Float64(), b.currentInterval) } // GetElapsedTime returns the elapsed time since an ExponentialBackOff instance diff --git a/vendor/github.com/cenk/backoff/tries.go b/vendor/github.com/cenk/backoff/tries.go new file mode 100644 index 0000000000..d2da7308b6 --- /dev/null +++ b/vendor/github.com/cenk/backoff/tries.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +/* +WithMaxTries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxTries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index a0e059d5e3..9dbc34b1eb 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" ) @@ -215,6 +216,16 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts if c.cfg.DialTimeout > 0 { opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} } + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + } + // Only relevant when KeepAliveTime is non-zero + if c.cfg.DialKeepAliveTimeout > 0 { + params.Timeout = c.cfg.DialKeepAliveTimeout + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go index c55228cc0b..e01489c06f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ b/vendor/github.com/coreos/etcd/clientv3/compare.go @@ -99,6 +99,18 @@ func (cmp *Cmp) ValueBytes() []byte { // WithValueBytes sets the byte slice for the comparison's value. func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } +// WithRange sets the comparison to scan the range [key, end). +func (cmp Cmp) WithRange(end string) Cmp { + cmp.RangeEnd = []byte(end) + return cmp +} + +// WithPrefix sets the comparison to scan all keys prefixed by the key. +func (cmp Cmp) WithPrefix() Cmp { + cmp.RangeEnd = getPrefix(cmp.Key) + return cmp +} + func mustInt64(val interface{}) int64 { if v, ok := val.(int64); ok { return v diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go index bba0586651..a9e69e8021 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go @@ -46,7 +46,7 @@ const ( // SerializableSnapshot provides serializable isolation and also checks // for write conflicts. SerializableSnapshot Isolation = iota - // Serializable reads within the same transactiona attempt return data + // Serializable reads within the same transaction attempt return data // from the at the revision of the first read. Serializable // RepeatableReads reads within the same transaction attempt always @@ -85,7 +85,7 @@ func WithPrefetch(keys ...string) stmOption { return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } } -// NewSTM initiates a new STM instance, using snapshot isolation by default. +// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { opts := &stmOptions{ctx: c.Ctx()} for _, f := range so { diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go index dda72a748e..d9545e430c 100644 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -33,6 +33,14 @@ type Config struct { // DialTimeout is the timeout for failing to establish a connection. DialTimeout time.Duration `json:"dial-timeout"` + // DialKeepAliveTime is the time in seconds after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + // TLS holds the client secure credentials, if any. TLS *tls.Config diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go index dc45fa4ad0..f887e04410 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -66,11 +66,13 @@ type OpResponse struct { put *PutResponse get *GetResponse del *DeleteResponse + txn *TxnResponse } func (op OpResponse) Put() *PutResponse { return op.put } func (op OpResponse) Get() *GetResponse { return op.get } func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } type kv struct { remote pb.KVClient @@ -134,7 +136,6 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { - // TODO: handle other ops case tRange: var resp *pb.RangeResponse resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) @@ -155,6 +156,12 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest()) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } default: panic("Unknown op") } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go index 2a75b7e9c5..236ab261dd 100644 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -28,6 +28,7 @@ type ( AlarmResponse pb.AlarmResponse AlarmMember pb.AlarmMember StatusResponse pb.StatusResponse + MoveLeaderResponse pb.MoveLeaderResponse ) type Maintenance interface { @@ -51,6 +52,10 @@ type Maintenance interface { // Snapshot provides a reader for a snapshot of a backend. Snapshot(ctx context.Context) (io.ReadCloser, error) + + // MoveLeader requests current leader to transfer its leadership to the transferee. + // Request must be made to the leader. + MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) } type maintenance struct { @@ -180,3 +185,8 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { }() return pr, nil } + +func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { + resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, grpc.FailFast(false)) + return (*MoveLeaderResponse)(resp), toErr(ctx, err) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go index 63b99501e8..ef6f04368a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -23,6 +23,7 @@ const ( tRange opType = iota + 1 tPut tDeleteRange + tTxn ) var ( @@ -67,10 +68,18 @@ type Op struct { // for put val []byte leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op } // accesors / mutators +func (op Op) IsTxn() bool { return op.t == tTxn } +func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } + // KeyBytes returns the byte slice holding the Op's key. func (op Op) KeyBytes() []byte { return op.key } @@ -113,6 +122,22 @@ func (op Op) toRangeRequest() *pb.RangeRequest { return r } +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + func (op Op) toRequestOp() *pb.RequestOp { switch op.t { case tRange: @@ -123,12 +148,27 @@ func (op Op) toRequestOp() *pb.RequestOp { case tDeleteRange: r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} + case tTxn: + return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} default: panic("Unknown Op") } } func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } return op.t != tRange } @@ -194,6 +234,10 @@ func OpPut(key, val string, opts ...OpOption) Op { return ret } +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + func opWatch(key string, opts ...OpOption) Op { ret := Op{t: tRange, key: []byte(key)} ret.applyOpts(opts) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index 0907e902c6..719ad8157a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -59,6 +59,7 @@ var ( ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management") ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") + ErrGRPCNotLeader = grpc.Errorf(codes.Unavailable, "etcdserver: not leader") ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped") ErrGRPCTimeout = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out") @@ -106,6 +107,7 @@ var ( grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, + grpc.ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, grpc.ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, @@ -153,6 +155,7 @@ var ( ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) ErrNoLeader = Error(ErrGRPCNoLeader) + ErrNotLeader = Error(ErrGRPCNotLeader) ErrNotCapable = Error(ErrGRPCNotCapable) ErrStopped = Error(ErrGRPCStopped) ErrTimeout = Error(ErrGRPCTimeout) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index aabf90061f..febf62cf41 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -58,6 +58,8 @@ MemberListResponse DefragmentRequest DefragmentResponse + MoveLeaderRequest + MoveLeaderResponse AlarmRequest AlarmMember AlarmResponse diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index 073d4a3946..1cef5dc9e2 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -204,7 +204,7 @@ func (x AlarmRequest_AlarmAction) String() string { return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) } func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{41, 0} + return fileDescriptorRpc, []int{43, 0} } type ResponseHeader struct { @@ -608,6 +608,7 @@ type RequestOp struct { // *RequestOp_RequestRange // *RequestOp_RequestPut // *RequestOp_RequestDeleteRange + // *RequestOp_RequestTxn Request isRequestOp_Request `protobuf_oneof:"request"` } @@ -631,10 +632,14 @@ type RequestOp_RequestPut struct { type RequestOp_RequestDeleteRange struct { RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` } +type RequestOp_RequestTxn struct { + RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"` +} func (*RequestOp_RequestRange) isRequestOp_Request() {} func (*RequestOp_RequestPut) isRequestOp_Request() {} func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} +func (*RequestOp_RequestTxn) isRequestOp_Request() {} func (m *RequestOp) GetRequest() isRequestOp_Request { if m != nil { @@ -664,12 +669,20 @@ func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { return nil } +func (m *RequestOp) GetRequestTxn() *TxnRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok { + return x.RequestTxn + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{ (*RequestOp_RequestRange)(nil), (*RequestOp_RequestPut)(nil), (*RequestOp_RequestDeleteRange)(nil), + (*RequestOp_RequestTxn)(nil), } } @@ -692,6 +705,11 @@ func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { return err } + case *RequestOp_RequestTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestTxn); err != nil { + return err + } case nil: default: return fmt.Errorf("RequestOp.Request has unexpected type %T", x) @@ -726,6 +744,14 @@ func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buff err := b.DecodeMessage(msg) m.Request = &RequestOp_RequestDeleteRange{msg} return true, err + case 4: // request.request_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestTxn{msg} + return true, err default: return false, nil } @@ -750,6 +776,11 @@ func _RequestOp_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *RequestOp_RequestTxn: + s := proto.Size(x.RequestTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -764,6 +795,7 @@ type ResponseOp struct { // *ResponseOp_ResponseRange // *ResponseOp_ResponsePut // *ResponseOp_ResponseDeleteRange + // *ResponseOp_ResponseTxn Response isResponseOp_Response `protobuf_oneof:"response"` } @@ -787,10 +819,14 @@ type ResponseOp_ResponsePut struct { type ResponseOp_ResponseDeleteRange struct { ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` } +type ResponseOp_ResponseTxn struct { + ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"` +} func (*ResponseOp_ResponseRange) isResponseOp_Response() {} func (*ResponseOp_ResponsePut) isResponseOp_Response() {} func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} +func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} func (m *ResponseOp) GetResponse() isResponseOp_Response { if m != nil { @@ -820,12 +856,20 @@ func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { return nil } +func (m *ResponseOp) GetResponseTxn() *TxnResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok { + return x.ResponseTxn + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{ (*ResponseOp_ResponseRange)(nil), (*ResponseOp_ResponsePut)(nil), (*ResponseOp_ResponseDeleteRange)(nil), + (*ResponseOp_ResponseTxn)(nil), } } @@ -848,6 +892,11 @@ func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { return err } + case *ResponseOp_ResponseTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseTxn); err != nil { + return err + } case nil: default: return fmt.Errorf("ResponseOp.Response has unexpected type %T", x) @@ -882,6 +931,14 @@ func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buf err := b.DecodeMessage(msg) m.Response = &ResponseOp_ResponseDeleteRange{msg} return true, err + case 4: // response.response_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseTxn{msg} + return true, err default: return false, nil } @@ -906,6 +963,11 @@ func _ResponseOp_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *ResponseOp_ResponseTxn: + s := proto.Size(x.ResponseTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -926,6 +988,9 @@ type Compare struct { // *Compare_ModRevision // *Compare_Value TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + RangeEnd []byte `protobuf:"bytes,8,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` } func (m *Compare) Reset() { *m = Compare{} } @@ -1013,6 +1078,13 @@ func (m *Compare) GetValue() []byte { return nil } +func (m *Compare) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{ @@ -2085,6 +2157,39 @@ func (m *DefragmentResponse) GetHeader() *ResponseHeader { return nil } +type MoveLeaderRequest struct { + // targetID is the node ID for the new leader. + TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` +} + +func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } +func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderRequest) ProtoMessage() {} +func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } + +func (m *MoveLeaderRequest) GetTargetID() uint64 { + if m != nil { + return m.TargetID + } + return 0 +} + +type MoveLeaderResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } +func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderResponse) ProtoMessage() {} +func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } + +func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + type AlarmRequest struct { // action is the kind of alarm request to issue. The action // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a @@ -2100,7 +2205,7 @@ type AlarmRequest struct { func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } func (*AlarmRequest) ProtoMessage() {} -func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } +func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { if m != nil { @@ -2133,7 +2238,7 @@ type AlarmMember struct { func (m *AlarmMember) Reset() { *m = AlarmMember{} } func (m *AlarmMember) String() string { return proto.CompactTextString(m) } func (*AlarmMember) ProtoMessage() {} -func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } +func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } func (m *AlarmMember) GetMemberID() uint64 { if m != nil { @@ -2158,7 +2263,7 @@ type AlarmResponse struct { func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } func (*AlarmResponse) ProtoMessage() {} -func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } +func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } func (m *AlarmResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2180,7 +2285,7 @@ type StatusRequest struct { func (m *StatusRequest) Reset() { *m = StatusRequest{} } func (m *StatusRequest) String() string { return proto.CompactTextString(m) } func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } type StatusResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -2199,7 +2304,7 @@ type StatusResponse struct { func (m *StatusResponse) Reset() { *m = StatusResponse{} } func (m *StatusResponse) String() string { return proto.CompactTextString(m) } func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } func (m *StatusResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2249,7 +2354,7 @@ type AuthEnableRequest struct { func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } func (*AuthEnableRequest) ProtoMessage() {} -func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } type AuthDisableRequest struct { } @@ -2257,7 +2362,7 @@ type AuthDisableRequest struct { func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } func (*AuthDisableRequest) ProtoMessage() {} -func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } type AuthenticateRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -2267,7 +2372,7 @@ type AuthenticateRequest struct { func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } func (*AuthenticateRequest) ProtoMessage() {} -func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } func (m *AuthenticateRequest) GetName() string { if m != nil { @@ -2291,7 +2396,7 @@ type AuthUserAddRequest struct { func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserAddRequest) ProtoMessage() {} -func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } func (m *AuthUserAddRequest) GetName() string { if m != nil { @@ -2314,7 +2419,7 @@ type AuthUserGetRequest struct { func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserGetRequest) ProtoMessage() {} -func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } func (m *AuthUserGetRequest) GetName() string { if m != nil { @@ -2331,7 +2436,7 @@ type AuthUserDeleteRequest struct { func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserDeleteRequest) ProtoMessage() {} -func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } func (m *AuthUserDeleteRequest) GetName() string { if m != nil { @@ -2351,7 +2456,7 @@ func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePas func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordRequest) ProtoMessage() {} func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{52} + return fileDescriptorRpc, []int{54} } func (m *AuthUserChangePasswordRequest) GetName() string { @@ -2378,7 +2483,7 @@ type AuthUserGrantRoleRequest struct { func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserGrantRoleRequest) ProtoMessage() {} -func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } func (m *AuthUserGrantRoleRequest) GetUser() string { if m != nil { @@ -2402,7 +2507,7 @@ type AuthUserRevokeRoleRequest struct { func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserRevokeRoleRequest) ProtoMessage() {} -func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } func (m *AuthUserRevokeRoleRequest) GetName() string { if m != nil { @@ -2426,7 +2531,7 @@ type AuthRoleAddRequest struct { func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleAddRequest) ProtoMessage() {} -func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } func (m *AuthRoleAddRequest) GetName() string { if m != nil { @@ -2442,7 +2547,7 @@ type AuthRoleGetRequest struct { func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleGetRequest) ProtoMessage() {} -func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } func (m *AuthRoleGetRequest) GetRole() string { if m != nil { @@ -2457,7 +2562,7 @@ type AuthUserListRequest struct { func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserListRequest) ProtoMessage() {} -func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } type AuthRoleListRequest struct { } @@ -2465,7 +2570,7 @@ type AuthRoleListRequest struct { func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleListRequest) ProtoMessage() {} -func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } type AuthRoleDeleteRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` @@ -2474,7 +2579,7 @@ type AuthRoleDeleteRequest struct { func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleDeleteRequest) ProtoMessage() {} -func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } func (m *AuthRoleDeleteRequest) GetRole() string { if m != nil { @@ -2494,7 +2599,7 @@ func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPer func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{60} + return fileDescriptorRpc, []int{62} } func (m *AuthRoleGrantPermissionRequest) GetName() string { @@ -2521,7 +2626,7 @@ func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokeP func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{61} + return fileDescriptorRpc, []int{63} } func (m *AuthRoleRevokePermissionRequest) GetRole() string { @@ -2552,7 +2657,7 @@ type AuthEnableResponse struct { func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } func (*AuthEnableResponse) ProtoMessage() {} -func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } func (m *AuthEnableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2568,7 +2673,7 @@ type AuthDisableResponse struct { func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } func (*AuthDisableResponse) ProtoMessage() {} -func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } func (m *AuthDisableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2586,7 +2691,7 @@ type AuthenticateResponse struct { func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } func (*AuthenticateResponse) ProtoMessage() {} -func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } func (m *AuthenticateResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2609,7 +2714,7 @@ type AuthUserAddResponse struct { func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserAddResponse) ProtoMessage() {} -func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2626,7 +2731,7 @@ type AuthUserGetResponse struct { func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserGetResponse) ProtoMessage() {} -func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{68} } func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2649,7 +2754,7 @@ type AuthUserDeleteResponse struct { func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserDeleteResponse) ProtoMessage() {} -func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2666,7 +2771,7 @@ func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePa func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordResponse) ProtoMessage() {} func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{68} + return fileDescriptorRpc, []int{70} } func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { @@ -2683,7 +2788,7 @@ type AuthUserGrantRoleResponse struct { func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserGrantRoleResponse) ProtoMessage() {} -func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2699,7 +2804,7 @@ type AuthUserRevokeRoleResponse struct { func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserRevokeRoleResponse) ProtoMessage() {} -func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2715,7 +2820,7 @@ type AuthRoleAddResponse struct { func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleAddResponse) ProtoMessage() {} -func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2732,7 +2837,7 @@ type AuthRoleGetResponse struct { func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleGetResponse) ProtoMessage() {} -func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2756,7 +2861,7 @@ type AuthRoleListResponse struct { func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleListResponse) ProtoMessage() {} -func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2780,7 +2885,7 @@ type AuthUserListResponse struct { func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserListResponse) ProtoMessage() {} -func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} } func (m *AuthUserListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2803,7 +2908,7 @@ type AuthRoleDeleteResponse struct { func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleDeleteResponse) ProtoMessage() {} -func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} } func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2820,7 +2925,7 @@ func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPe func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{76} + return fileDescriptorRpc, []int{78} } func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { @@ -2838,7 +2943,7 @@ func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevoke func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{77} + return fileDescriptorRpc, []int{79} } func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { @@ -2890,6 +2995,8 @@ func init() { proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") + proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") + proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") @@ -3670,6 +3777,8 @@ type MaintenanceClient interface { Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) } type maintenanceClient struct { @@ -3748,6 +3857,15 @@ func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { return m, nil } +func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { + out := new(MoveLeaderResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Maintenance service type MaintenanceServer interface { @@ -3763,6 +3881,8 @@ type MaintenanceServer interface { Hash(context.Context, *HashRequest) (*HashResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) } func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { @@ -3862,6 +3982,24 @@ func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { return x.ServerStream.SendMsg(m) } +func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveLeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).MoveLeader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/MoveLeader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Maintenance_serviceDesc = grpc.ServiceDesc{ ServiceName: "etcdserverpb.Maintenance", HandlerType: (*MaintenanceServer)(nil), @@ -3882,6 +4020,10 @@ var _Maintenance_serviceDesc = grpc.ServiceDesc{ MethodName: "Hash", Handler: _Maintenance_Hash_Handler, }, + { + MethodName: "MoveLeader", + Handler: _Maintenance_MoveLeader_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -4932,6 +5074,20 @@ func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { } return i, nil } +func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size())) + n9, err := m.RequestTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} func (m *ResponseOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4948,11 +5104,11 @@ func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { var l int _ = l if m.Response != nil { - nn9, err := m.Response.MarshalTo(dAtA[i:]) + nn10, err := m.Response.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn9 + i += nn10 } return i, nil } @@ -4963,11 +5119,11 @@ func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) - n10, err := m.ResponseRange.MarshalTo(dAtA[i:]) + n11, err := m.ResponseRange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } return i, nil } @@ -4977,11 +5133,11 @@ func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) - n11, err := m.ResponsePut.MarshalTo(dAtA[i:]) + n12, err := m.ResponsePut.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } return i, nil } @@ -4991,11 +5147,25 @@ func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) - n12, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) + n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 + } + return i, nil +} +func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size())) + n14, err := m.ResponseTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 } return i, nil } @@ -5031,11 +5201,17 @@ func (m *Compare) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], m.Key) } if m.TargetUnion != nil { - nn13, err := m.TargetUnion.MarshalTo(dAtA[i:]) + nn15, err := m.TargetUnion.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn13 + i += nn15 + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } return i, nil } @@ -5144,11 +5320,11 @@ func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n14, err := m.Header.MarshalTo(dAtA[i:]) + n16, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n16 } if m.Succeeded { dAtA[i] = 0x10 @@ -5227,11 +5403,11 @@ func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n15, err := m.Header.MarshalTo(dAtA[i:]) + n17, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n17 } return i, nil } @@ -5273,11 +5449,11 @@ func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n16, err := m.Header.MarshalTo(dAtA[i:]) + n18, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n18 } if m.Hash != 0 { dAtA[i] = 0x10 @@ -5324,11 +5500,11 @@ func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n17, err := m.Header.MarshalTo(dAtA[i:]) + n19, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n19 } if m.RemainingBytes != 0 { dAtA[i] = 0x10 @@ -5360,11 +5536,11 @@ func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { var l int _ = l if m.RequestUnion != nil { - nn18, err := m.RequestUnion.MarshalTo(dAtA[i:]) + nn20, err := m.RequestUnion.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn18 + i += nn20 } return i, nil } @@ -5375,11 +5551,11 @@ func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) - n19, err := m.CreateRequest.MarshalTo(dAtA[i:]) + n21, err := m.CreateRequest.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n21 } return i, nil } @@ -5389,11 +5565,11 @@ func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) - n20, err := m.CancelRequest.MarshalTo(dAtA[i:]) + n22, err := m.CancelRequest.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n22 } return i, nil } @@ -5440,21 +5616,21 @@ func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { i++ } if len(m.Filters) > 0 { - dAtA22 := make([]byte, len(m.Filters)*10) - var j21 int + dAtA24 := make([]byte, len(m.Filters)*10) + var j23 int for _, num := range m.Filters { for num >= 1<<7 { - dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80) + dAtA24[j23] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j21++ + j23++ } - dAtA22[j21] = uint8(num) - j21++ + dAtA24[j23] = uint8(num) + j23++ } dAtA[i] = 0x2a i++ - i = encodeVarintRpc(dAtA, i, uint64(j21)) - i += copy(dAtA[i:], dAtA22[:j21]) + i = encodeVarintRpc(dAtA, i, uint64(j23)) + i += copy(dAtA[i:], dAtA24[:j23]) } if m.PrevKv { dAtA[i] = 0x30 @@ -5511,11 +5687,11 @@ func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n23, err := m.Header.MarshalTo(dAtA[i:]) + n25, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n25 } if m.WatchId != 0 { dAtA[i] = 0x10 @@ -5615,11 +5791,11 @@ func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n24, err := m.Header.MarshalTo(dAtA[i:]) + n26, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n26 } if m.ID != 0 { dAtA[i] = 0x10 @@ -5682,11 +5858,11 @@ func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n25, err := m.Header.MarshalTo(dAtA[i:]) + n27, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n27 } return i, nil } @@ -5733,11 +5909,11 @@ func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n26, err := m.Header.MarshalTo(dAtA[i:]) + n28, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n28 } if m.ID != 0 { dAtA[i] = 0x10 @@ -5804,11 +5980,11 @@ func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n27, err := m.Header.MarshalTo(dAtA[i:]) + n29, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n29 } if m.ID != 0 { dAtA[i] = 0x10 @@ -5947,21 +6123,21 @@ func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n28, err := m.Header.MarshalTo(dAtA[i:]) + n30, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n30 } if m.Member != nil { dAtA[i] = 0x12 i++ i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) - n29, err := m.Member.MarshalTo(dAtA[i:]) + n31, err := m.Member.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n31 } if len(m.Members) > 0 { for _, msg := range m.Members { @@ -6020,11 +6196,11 @@ func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n30, err := m.Header.MarshalTo(dAtA[i:]) + n32, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n32 } if len(m.Members) > 0 { for _, msg := range m.Members { @@ -6098,11 +6274,11 @@ func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n31, err := m.Header.MarshalTo(dAtA[i:]) + n33, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n33 } if len(m.Members) > 0 { for _, msg := range m.Members { @@ -6156,11 +6332,11 @@ func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n32, err := m.Header.MarshalTo(dAtA[i:]) + n34, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n34 } if len(m.Members) > 0 { for _, msg := range m.Members { @@ -6214,11 +6390,62 @@ func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n33, err := m.Header.MarshalTo(dAtA[i:]) + n35, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n35 + } + return i, nil +} + +func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TargetID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) + } + return i, nil +} + +func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n36, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 } return i, nil } @@ -6303,11 +6530,11 @@ func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n34, err := m.Header.MarshalTo(dAtA[i:]) + n37, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n37 } if len(m.Alarms) > 0 { for _, msg := range m.Alarms { @@ -6361,11 +6588,11 @@ func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n35, err := m.Header.MarshalTo(dAtA[i:]) + n38, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n38 } if len(m.Version) > 0 { dAtA[i] = 0x12 @@ -6763,11 +6990,11 @@ func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) - n36, err := m.Perm.MarshalTo(dAtA[i:]) + n39, err := m.Perm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n39 } return i, nil } @@ -6827,11 +7054,11 @@ func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n37, err := m.Header.MarshalTo(dAtA[i:]) + n40, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n40 } return i, nil } @@ -6855,11 +7082,11 @@ func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n38, err := m.Header.MarshalTo(dAtA[i:]) + n41, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n41 } return i, nil } @@ -6883,11 +7110,11 @@ func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n39, err := m.Header.MarshalTo(dAtA[i:]) + n42, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n42 } if len(m.Token) > 0 { dAtA[i] = 0x12 @@ -6917,11 +7144,11 @@ func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n40, err := m.Header.MarshalTo(dAtA[i:]) + n43, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n43 } return i, nil } @@ -6945,11 +7172,11 @@ func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n41, err := m.Header.MarshalTo(dAtA[i:]) + n44, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n44 } if len(m.Roles) > 0 { for _, s := range m.Roles { @@ -6988,11 +7215,11 @@ func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n42, err := m.Header.MarshalTo(dAtA[i:]) + n45, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n45 } return i, nil } @@ -7016,11 +7243,11 @@ func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n43, err := m.Header.MarshalTo(dAtA[i:]) + n46, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n46 } return i, nil } @@ -7044,11 +7271,11 @@ func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n44, err := m.Header.MarshalTo(dAtA[i:]) + n47, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n47 } return i, nil } @@ -7072,11 +7299,11 @@ func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n45, err := m.Header.MarshalTo(dAtA[i:]) + n48, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n48 } return i, nil } @@ -7100,11 +7327,11 @@ func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n46, err := m.Header.MarshalTo(dAtA[i:]) + n49, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n49 } return i, nil } @@ -7128,11 +7355,11 @@ func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n47, err := m.Header.MarshalTo(dAtA[i:]) + n50, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n50 } if len(m.Perm) > 0 { for _, msg := range m.Perm { @@ -7168,11 +7395,11 @@ func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n48, err := m.Header.MarshalTo(dAtA[i:]) + n51, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n51 } if len(m.Roles) > 0 { for _, s := range m.Roles { @@ -7211,11 +7438,11 @@ func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n49, err := m.Header.MarshalTo(dAtA[i:]) + n52, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n52 } if len(m.Users) > 0 { for _, s := range m.Users { @@ -7254,11 +7481,11 @@ func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n50, err := m.Header.MarshalTo(dAtA[i:]) + n53, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n53 } return i, nil } @@ -7282,11 +7509,11 @@ func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n51, err := m.Header.MarshalTo(dAtA[i:]) + n54, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n54 } return i, nil } @@ -7310,11 +7537,11 @@ func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n52, err := m.Header.MarshalTo(dAtA[i:]) + n55, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n55 } return i, nil } @@ -7545,6 +7772,15 @@ func (m *RequestOp_RequestDeleteRange) Size() (n int) { } return n } +func (m *RequestOp_RequestTxn) Size() (n int) { + var l int + _ = l + if m.RequestTxn != nil { + l = m.RequestTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} func (m *ResponseOp) Size() (n int) { var l int _ = l @@ -7581,6 +7817,15 @@ func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { } return n } +func (m *ResponseOp_ResponseTxn) Size() (n int) { + var l int + _ = l + if m.ResponseTxn != nil { + l = m.ResponseTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} func (m *Compare) Size() (n int) { var l int _ = l @@ -7597,6 +7842,10 @@ func (m *Compare) Size() (n int) { if m.TargetUnion != nil { n += m.TargetUnion.Size() } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } return n } @@ -8096,6 +8345,25 @@ func (m *DefragmentResponse) Size() (n int) { return n } +func (m *MoveLeaderRequest) Size() (n int) { + var l int + _ = l + if m.TargetID != 0 { + n += 1 + sovRpc(uint64(m.TargetID)) + } + return n +} + +func (m *MoveLeaderResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + func (m *AlarmRequest) Size() (n int) { var l int _ = l @@ -9848,6 +10116,38 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } m.Request = &RequestOp_RequestDeleteRange{v} iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestTxn{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -9994,6 +10294,38 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } m.Response = &ResponseOp_ResponseDeleteRange{v} iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseTxn{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -10203,6 +10535,37 @@ func (m *Compare) Unmarshal(dAtA []byte) error { copy(v, dAtA[iNdEx:postIndex]) m.TargetUnion = &Compare_Value{v} iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -13557,6 +13920,158 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType) + } + m.TargetID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AlarmRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -17071,221 +17586,227 @@ var ( func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } var fileDescriptorRpc = []byte{ - // 3450 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3b, 0x5b, 0x6f, 0x1b, 0xc7, - 0xb9, 0x5a, 0x5e, 0xc5, 0x8f, 0x17, 0xd1, 0x23, 0xd9, 0xa6, 0x68, 0x5b, 0x96, 0xc7, 0x37, 0xd9, - 0x4e, 0xa4, 0x44, 0xc9, 0x39, 0x0f, 0x3e, 0x41, 0x70, 0x64, 0x89, 0xb1, 0x74, 0x24, 0x4b, 0xce, - 0x4a, 0x76, 0x72, 0x80, 0xa0, 0xc4, 0x8a, 0x1c, 0x53, 0x0b, 0x91, 0xbb, 0xcc, 0xee, 0x92, 0x96, - 0xd2, 0x14, 0x28, 0xd2, 0x04, 0x45, 0x0b, 0xf4, 0xa5, 0x79, 0xe8, 0xed, 0xb1, 0x28, 0x8a, 0xfc, - 0x80, 0xbe, 0xf5, 0x07, 0x14, 0x7d, 0x69, 0x81, 0xfe, 0x81, 0x22, 0xed, 0x63, 0xdf, 0xfb, 0x54, - 0xb4, 0x98, 0xdb, 0xee, 0xec, 0x72, 0x97, 0x52, 0xca, 0x26, 0x2f, 0xd6, 0xce, 0x37, 0xdf, 0x7c, - 0xb7, 0x99, 0xef, 0x32, 0xdf, 0xd0, 0x50, 0x70, 0xfa, 0xad, 0xe5, 0xbe, 0x63, 0x7b, 0x36, 0x2a, - 0x11, 0xaf, 0xd5, 0x76, 0x89, 0x33, 0x24, 0x4e, 0xff, 0xb0, 0x3e, 0xd7, 0xb1, 0x3b, 0x36, 0x9b, - 0x58, 0xa1, 0x5f, 0x1c, 0xa7, 0x3e, 0x4f, 0x71, 0x56, 0x7a, 0xc3, 0x56, 0x8b, 0xfd, 0xd3, 0x3f, - 0x5c, 0x39, 0x1e, 0x8a, 0xa9, 0x2b, 0x6c, 0xca, 0x18, 0x78, 0x47, 0xec, 0x9f, 0xfe, 0x21, 0xfb, - 0x23, 0x26, 0xaf, 0x76, 0x6c, 0xbb, 0xd3, 0x25, 0x2b, 0x46, 0xdf, 0x5c, 0x31, 0x2c, 0xcb, 0xf6, - 0x0c, 0xcf, 0xb4, 0x2d, 0x97, 0xcf, 0xe2, 0xcf, 0x34, 0xa8, 0xe8, 0xc4, 0xed, 0xdb, 0x96, 0x4b, - 0x36, 0x89, 0xd1, 0x26, 0x0e, 0xba, 0x06, 0xd0, 0xea, 0x0e, 0x5c, 0x8f, 0x38, 0x4d, 0xb3, 0x5d, - 0xd3, 0x16, 0xb5, 0xa5, 0x8c, 0x5e, 0x10, 0x90, 0xad, 0x36, 0xba, 0x02, 0x85, 0x1e, 0xe9, 0x1d, - 0xf2, 0xd9, 0x14, 0x9b, 0x9d, 0xe6, 0x80, 0xad, 0x36, 0xaa, 0xc3, 0xb4, 0x43, 0x86, 0xa6, 0x6b, - 0xda, 0x56, 0x2d, 0xbd, 0xa8, 0x2d, 0xa5, 0x75, 0x7f, 0x4c, 0x17, 0x3a, 0xc6, 0x0b, 0xaf, 0xe9, - 0x11, 0xa7, 0x57, 0xcb, 0xf0, 0x85, 0x14, 0x70, 0x40, 0x9c, 0x1e, 0xfe, 0x34, 0x0b, 0x25, 0xdd, - 0xb0, 0x3a, 0x44, 0x27, 0x1f, 0x0e, 0x88, 0xeb, 0xa1, 0x2a, 0xa4, 0x8f, 0xc9, 0x29, 0x63, 0x5f, - 0xd2, 0xe9, 0x27, 0x5f, 0x6f, 0x75, 0x48, 0x93, 0x58, 0x9c, 0x71, 0x89, 0xae, 0xb7, 0x3a, 0xa4, - 0x61, 0xb5, 0xd1, 0x1c, 0x64, 0xbb, 0x66, 0xcf, 0xf4, 0x04, 0x57, 0x3e, 0x08, 0x89, 0x93, 0x89, - 0x88, 0xb3, 0x0e, 0xe0, 0xda, 0x8e, 0xd7, 0xb4, 0x9d, 0x36, 0x71, 0x6a, 0xd9, 0x45, 0x6d, 0xa9, - 0xb2, 0x7a, 0x6b, 0x59, 0xdd, 0x88, 0x65, 0x55, 0xa0, 0xe5, 0x7d, 0xdb, 0xf1, 0xf6, 0x28, 0xae, - 0x5e, 0x70, 0xe5, 0x27, 0x7a, 0x07, 0x8a, 0x8c, 0x88, 0x67, 0x38, 0x1d, 0xe2, 0xd5, 0x72, 0x8c, - 0xca, 0xed, 0x33, 0xa8, 0x1c, 0x30, 0x64, 0x9d, 0xb1, 0xe7, 0xdf, 0x08, 0x43, 0xc9, 0x25, 0x8e, - 0x69, 0x74, 0xcd, 0x8f, 0x8c, 0xc3, 0x2e, 0xa9, 0xe5, 0x17, 0xb5, 0xa5, 0x69, 0x3d, 0x04, 0xa3, - 0xfa, 0x1f, 0x93, 0x53, 0xb7, 0x69, 0x5b, 0xdd, 0xd3, 0xda, 0x34, 0x43, 0x98, 0xa6, 0x80, 0x3d, - 0xab, 0x7b, 0xca, 0x36, 0xcd, 0x1e, 0x58, 0x1e, 0x9f, 0x2d, 0xb0, 0xd9, 0x02, 0x83, 0xb0, 0xe9, - 0x25, 0xa8, 0xf6, 0x4c, 0xab, 0xd9, 0xb3, 0xdb, 0x4d, 0xdf, 0x20, 0xc0, 0x0c, 0x52, 0xe9, 0x99, - 0xd6, 0x13, 0xbb, 0xad, 0x4b, 0xb3, 0x50, 0x4c, 0xe3, 0x24, 0x8c, 0x59, 0x14, 0x98, 0xc6, 0x89, - 0x8a, 0xb9, 0x0c, 0xb3, 0x94, 0x66, 0xcb, 0x21, 0x86, 0x47, 0x02, 0xe4, 0x12, 0x43, 0xbe, 0xd0, - 0x33, 0xad, 0x75, 0x36, 0x13, 0xc2, 0x37, 0x4e, 0x46, 0xf0, 0xcb, 0x02, 0xdf, 0x38, 0x09, 0xe3, - 0xe3, 0x65, 0x28, 0xf8, 0x36, 0x47, 0xd3, 0x90, 0xd9, 0xdd, 0xdb, 0x6d, 0x54, 0xa7, 0x10, 0x40, - 0x6e, 0x6d, 0x7f, 0xbd, 0xb1, 0xbb, 0x51, 0xd5, 0x50, 0x11, 0xf2, 0x1b, 0x0d, 0x3e, 0x48, 0xe1, - 0x47, 0x00, 0x81, 0x75, 0x51, 0x1e, 0xd2, 0xdb, 0x8d, 0xff, 0xaf, 0x4e, 0x51, 0x9c, 0xe7, 0x0d, - 0x7d, 0x7f, 0x6b, 0x6f, 0xb7, 0xaa, 0xd1, 0xc5, 0xeb, 0x7a, 0x63, 0xed, 0xa0, 0x51, 0x4d, 0x51, - 0x8c, 0x27, 0x7b, 0x1b, 0xd5, 0x34, 0x2a, 0x40, 0xf6, 0xf9, 0xda, 0xce, 0xb3, 0x46, 0x35, 0x83, - 0x3f, 0xd7, 0xa0, 0x2c, 0xf6, 0x8b, 0xfb, 0x04, 0x7a, 0x13, 0x72, 0x47, 0xcc, 0x2f, 0xd8, 0x51, - 0x2c, 0xae, 0x5e, 0x8d, 0x6c, 0x6e, 0xc8, 0x77, 0x74, 0x81, 0x8b, 0x30, 0xa4, 0x8f, 0x87, 0x6e, - 0x2d, 0xb5, 0x98, 0x5e, 0x2a, 0xae, 0x56, 0x97, 0xb9, 0xc3, 0x2e, 0x6f, 0x93, 0xd3, 0xe7, 0x46, - 0x77, 0x40, 0x74, 0x3a, 0x89, 0x10, 0x64, 0x7a, 0xb6, 0x43, 0xd8, 0x89, 0x9d, 0xd6, 0xd9, 0x37, - 0x3d, 0xc6, 0x6c, 0xd3, 0xc4, 0x69, 0xe5, 0x03, 0xfc, 0x85, 0x06, 0xf0, 0x74, 0xe0, 0x25, 0xbb, - 0xc6, 0x1c, 0x64, 0x87, 0x94, 0xb0, 0x70, 0x0b, 0x3e, 0x60, 0x3e, 0x41, 0x0c, 0x97, 0xf8, 0x3e, - 0x41, 0x07, 0xe8, 0x32, 0xe4, 0xfb, 0x0e, 0x19, 0x36, 0x8f, 0x87, 0x8c, 0xc9, 0xb4, 0x9e, 0xa3, - 0xc3, 0xed, 0x21, 0xba, 0x01, 0x25, 0xb3, 0x63, 0xd9, 0x0e, 0x69, 0x72, 0x5a, 0x59, 0x36, 0x5b, - 0xe4, 0x30, 0x26, 0xb7, 0x82, 0xc2, 0x09, 0xe7, 0x54, 0x94, 0x1d, 0x0a, 0xc2, 0x16, 0x14, 0x99, - 0xa8, 0x13, 0x99, 0xef, 0x5e, 0x20, 0x63, 0x8a, 0x2d, 0x1b, 0x35, 0xa1, 0x90, 0x1a, 0x7f, 0x00, - 0x68, 0x83, 0x74, 0x89, 0x47, 0x26, 0x89, 0x1e, 0x8a, 0x4d, 0xd2, 0xaa, 0x4d, 0xf0, 0x8f, 0x35, - 0x98, 0x0d, 0x91, 0x9f, 0x48, 0xad, 0x1a, 0xe4, 0xdb, 0x8c, 0x18, 0x97, 0x20, 0xad, 0xcb, 0x21, - 0x7a, 0x00, 0xd3, 0x42, 0x00, 0xb7, 0x96, 0x4e, 0x38, 0x34, 0x79, 0x2e, 0x93, 0x8b, 0xff, 0xa6, - 0x41, 0x41, 0x28, 0xba, 0xd7, 0x47, 0x6b, 0x50, 0x76, 0xf8, 0xa0, 0xc9, 0xf4, 0x11, 0x12, 0xd5, - 0x93, 0x83, 0xd0, 0xe6, 0x94, 0x5e, 0x12, 0x4b, 0x18, 0x18, 0xfd, 0x0f, 0x14, 0x25, 0x89, 0xfe, - 0xc0, 0x13, 0x26, 0xaf, 0x85, 0x09, 0x04, 0xe7, 0x6f, 0x73, 0x4a, 0x07, 0x81, 0xfe, 0x74, 0xe0, - 0xa1, 0x03, 0x98, 0x93, 0x8b, 0xb9, 0x36, 0x42, 0x8c, 0x34, 0xa3, 0xb2, 0x18, 0xa6, 0x32, 0xba, - 0x55, 0x9b, 0x53, 0x3a, 0x12, 0xeb, 0x95, 0xc9, 0x47, 0x05, 0xc8, 0x0b, 0x28, 0xfe, 0xbb, 0x06, - 0x20, 0x0d, 0xba, 0xd7, 0x47, 0x1b, 0x50, 0x71, 0xc4, 0x28, 0xa4, 0xf0, 0x95, 0x58, 0x85, 0xc5, - 0x3e, 0x4c, 0xe9, 0x65, 0xb9, 0x88, 0xab, 0xfc, 0x36, 0x94, 0x7c, 0x2a, 0x81, 0xce, 0xf3, 0x31, - 0x3a, 0xfb, 0x14, 0x8a, 0x72, 0x01, 0xd5, 0xfa, 0x3d, 0xb8, 0xe8, 0xaf, 0x8f, 0x51, 0xfb, 0xc6, - 0x18, 0xb5, 0x7d, 0x82, 0xb3, 0x92, 0x82, 0xaa, 0x38, 0xd0, 0x94, 0xc5, 0xc1, 0xf8, 0x8b, 0x34, - 0xe4, 0xd7, 0xed, 0x5e, 0xdf, 0x70, 0xe8, 0x1e, 0xe5, 0x1c, 0xe2, 0x0e, 0xba, 0x1e, 0x53, 0xb7, - 0xb2, 0x7a, 0x33, 0xcc, 0x41, 0xa0, 0xc9, 0xbf, 0x3a, 0x43, 0xd5, 0xc5, 0x12, 0xba, 0x58, 0x64, - 0xa8, 0xd4, 0x39, 0x16, 0x8b, 0xfc, 0x24, 0x96, 0x48, 0x5f, 0x4a, 0x07, 0xbe, 0x54, 0x87, 0xfc, - 0x90, 0x38, 0x41, 0x56, 0xdd, 0x9c, 0xd2, 0x25, 0x00, 0xdd, 0x83, 0x99, 0x68, 0x84, 0xcf, 0x0a, - 0x9c, 0x4a, 0x2b, 0x9c, 0x10, 0x6e, 0x42, 0x29, 0x94, 0x66, 0x72, 0x02, 0xaf, 0xd8, 0x53, 0xb2, - 0xcc, 0x25, 0x19, 0xda, 0x68, 0x4a, 0x2c, 0x6d, 0x4e, 0x89, 0xe0, 0x86, 0xff, 0x17, 0xca, 0x21, - 0x5d, 0x69, 0x14, 0x6f, 0xbc, 0xfb, 0x6c, 0x6d, 0x87, 0x87, 0xfc, 0xc7, 0x2c, 0xca, 0xeb, 0x55, - 0x8d, 0x66, 0x8e, 0x9d, 0xc6, 0xfe, 0x7e, 0x35, 0x85, 0xca, 0x50, 0xd8, 0xdd, 0x3b, 0x68, 0x72, - 0xac, 0x34, 0x7e, 0xcb, 0xa7, 0x20, 0x52, 0x86, 0x92, 0x29, 0xa6, 0x94, 0x4c, 0xa1, 0xc9, 0x4c, - 0x91, 0x0a, 0x32, 0x45, 0xfa, 0x51, 0x05, 0x4a, 0xdc, 0x3e, 0xcd, 0x81, 0x45, 0xb3, 0xd5, 0x2f, - 0x35, 0x80, 0x83, 0x13, 0x4b, 0x06, 0xa0, 0x15, 0xc8, 0xb7, 0x38, 0xf1, 0x9a, 0xc6, 0xfc, 0xf9, - 0x62, 0xac, 0xc9, 0x75, 0x89, 0x85, 0x5e, 0x87, 0xbc, 0x3b, 0x68, 0xb5, 0x88, 0x2b, 0xb3, 0xc6, - 0xe5, 0x68, 0x48, 0x11, 0x0e, 0xaf, 0x4b, 0x3c, 0xba, 0xe4, 0x85, 0x61, 0x76, 0x07, 0x2c, 0x87, - 0x8c, 0x5f, 0x22, 0xf0, 0xf0, 0xcf, 0x34, 0x28, 0x32, 0x29, 0x27, 0x8a, 0x63, 0x57, 0xa1, 0xc0, - 0x64, 0x20, 0x6d, 0x11, 0xc9, 0xa6, 0xf5, 0x00, 0x80, 0xfe, 0x1b, 0x0a, 0xf2, 0x04, 0xcb, 0x60, - 0x56, 0x8b, 0x27, 0xbb, 0xd7, 0xd7, 0x03, 0x54, 0xbc, 0x0d, 0x17, 0x98, 0x55, 0x5a, 0xb4, 0x3e, - 0x95, 0x76, 0x54, 0x2b, 0x38, 0x2d, 0x52, 0xc1, 0xd5, 0x61, 0xba, 0x7f, 0x74, 0xea, 0x9a, 0x2d, - 0xa3, 0x2b, 0xa4, 0xf0, 0xc7, 0xf8, 0xff, 0x00, 0xa9, 0xc4, 0x26, 0x51, 0x17, 0x97, 0xa1, 0xb8, - 0x69, 0xb8, 0x47, 0x42, 0x24, 0xfc, 0x3e, 0x94, 0xf8, 0x70, 0x22, 0x1b, 0x22, 0xc8, 0x1c, 0x19, - 0xee, 0x11, 0x13, 0xbc, 0xac, 0xb3, 0x6f, 0x7c, 0x01, 0x66, 0xf6, 0x2d, 0xa3, 0xef, 0x1e, 0xd9, - 0x32, 0xd6, 0xd2, 0xfa, 0xbc, 0x1a, 0xc0, 0x26, 0xe2, 0x78, 0x17, 0x66, 0x1c, 0xd2, 0x33, 0x4c, - 0xcb, 0xb4, 0x3a, 0xcd, 0xc3, 0x53, 0x8f, 0xb8, 0xa2, 0x7c, 0xaf, 0xf8, 0xe0, 0x47, 0x14, 0x4a, - 0x45, 0x3b, 0xec, 0xda, 0x87, 0xc2, 0xe3, 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0xa5, 0xf7, 0x0c, 0xaf, - 0x25, 0xad, 0x80, 0xb6, 0xa0, 0xe2, 0xfb, 0x39, 0x83, 0x08, 0x59, 0x22, 0x01, 0x9f, 0xad, 0x91, - 0x85, 0x9d, 0x0c, 0xf8, 0xe5, 0x96, 0x0a, 0x60, 0xa4, 0x0c, 0xab, 0x45, 0xba, 0x3e, 0xa9, 0x54, - 0x32, 0x29, 0x86, 0xa8, 0x92, 0x52, 0x01, 0x8f, 0x66, 0x82, 0x64, 0xc8, 0xdd, 0xf2, 0xe7, 0x29, - 0x40, 0xa3, 0x32, 0x7c, 0xd5, 0xfa, 0xe0, 0x36, 0x54, 0x5c, 0xcf, 0x70, 0xbc, 0x66, 0xe4, 0x72, - 0x53, 0x66, 0x50, 0x3f, 0x56, 0xdd, 0x85, 0x99, 0xbe, 0x63, 0x77, 0x1c, 0xe2, 0xba, 0x4d, 0xcb, - 0xf6, 0xcc, 0x17, 0xa7, 0xa2, 0xc4, 0xaa, 0x48, 0xf0, 0x2e, 0x83, 0xa2, 0x06, 0xe4, 0x5f, 0x98, - 0x5d, 0x8f, 0x38, 0x6e, 0x2d, 0xbb, 0x98, 0x5e, 0xaa, 0xac, 0x3e, 0x38, 0xcb, 0x6a, 0xcb, 0xef, - 0x30, 0xfc, 0x83, 0xd3, 0x3e, 0xd1, 0xe5, 0x5a, 0xb5, 0x6c, 0xc9, 0x85, 0xca, 0x96, 0xdb, 0x00, - 0x01, 0x3e, 0x8d, 0x5a, 0xbb, 0x7b, 0x4f, 0x9f, 0x1d, 0x54, 0xa7, 0x50, 0x09, 0xa6, 0x77, 0xf7, - 0x36, 0x1a, 0x3b, 0x0d, 0x1a, 0xd7, 0xf0, 0x8a, 0xb4, 0x8d, 0x6a, 0x43, 0x34, 0x0f, 0xd3, 0x2f, - 0x29, 0x54, 0xde, 0xfe, 0xd2, 0x7a, 0x9e, 0x8d, 0xb7, 0xda, 0xf8, 0x47, 0x29, 0x28, 0x8b, 0x53, - 0x30, 0xd1, 0x51, 0x54, 0x59, 0xa4, 0x42, 0x2c, 0x68, 0x8d, 0xc4, 0x4f, 0x47, 0x5b, 0x94, 0x62, - 0x72, 0x48, 0xdd, 0x9d, 0x6f, 0x36, 0x69, 0x0b, 0xb3, 0xfa, 0x63, 0x74, 0x0f, 0xaa, 0x2d, 0xee, - 0xee, 0x91, 0xb4, 0xa3, 0xcf, 0x08, 0xb8, 0x92, 0x75, 0xca, 0xfe, 0x69, 0x33, 0x5c, 0x91, 0x76, - 0x0a, 0x7a, 0x49, 0x1e, 0x24, 0x0a, 0x43, 0xb7, 0x21, 0x47, 0x86, 0xc4, 0xf2, 0xdc, 0x5a, 0x91, - 0x05, 0xb0, 0xb2, 0xac, 0xc6, 0x1a, 0x14, 0xaa, 0x8b, 0x49, 0xfc, 0x5f, 0x70, 0x81, 0x55, 0xbd, - 0x8f, 0x1d, 0xc3, 0x52, 0xcb, 0xf3, 0x83, 0x83, 0x1d, 0x61, 0x3a, 0xfa, 0x89, 0x2a, 0x90, 0xda, - 0xda, 0x10, 0x8a, 0xa6, 0xb6, 0x36, 0xf0, 0x27, 0x1a, 0x20, 0x75, 0xdd, 0x44, 0xb6, 0x8c, 0x10, - 0x97, 0xec, 0xd3, 0x01, 0xfb, 0x39, 0xc8, 0x12, 0xc7, 0xb1, 0x1d, 0x66, 0xb5, 0x82, 0xce, 0x07, - 0xf8, 0x96, 0x90, 0x41, 0x27, 0x43, 0xfb, 0xd8, 0x77, 0x0c, 0x4e, 0x4d, 0xf3, 0x45, 0xdd, 0x86, - 0xd9, 0x10, 0xd6, 0x44, 0x81, 0xf4, 0x2e, 0x5c, 0x64, 0xc4, 0xb6, 0x09, 0xe9, 0xaf, 0x75, 0xcd, - 0x61, 0x22, 0xd7, 0x3e, 0x5c, 0x8a, 0x22, 0x7e, 0xbd, 0x36, 0xc2, 0x6f, 0x09, 0x8e, 0x07, 0x66, - 0x8f, 0x1c, 0xd8, 0x3b, 0xc9, 0xb2, 0xd1, 0xe8, 0x48, 0x6f, 0xdd, 0x22, 0xe3, 0xb0, 0x6f, 0xfc, - 0x2b, 0x0d, 0x2e, 0x8f, 0x2c, 0xff, 0x9a, 0x77, 0x75, 0x01, 0xa0, 0x43, 0x8f, 0x0f, 0x69, 0xd3, - 0x09, 0x7e, 0x5f, 0x54, 0x20, 0xbe, 0x9c, 0x34, 0xc0, 0x94, 0x84, 0x9c, 0x47, 0x90, 0x7b, 0xc2, - 0x5a, 0x35, 0x8a, 0x56, 0x19, 0xa9, 0x95, 0x65, 0xf4, 0xf8, 0x05, 0xb2, 0xa0, 0xb3, 0x6f, 0x96, - 0x5f, 0x09, 0x71, 0x9e, 0xe9, 0x3b, 0x3c, 0x8f, 0x17, 0x74, 0x7f, 0x4c, 0xb9, 0xb7, 0xba, 0x26, - 0xb1, 0x3c, 0x36, 0x9b, 0x61, 0xb3, 0x0a, 0x04, 0x2f, 0x43, 0x95, 0x73, 0x5a, 0x6b, 0xb7, 0x95, - 0x5c, 0xee, 0xd3, 0xd3, 0xc2, 0xf4, 0xf0, 0xaf, 0x35, 0xb8, 0xa0, 0x2c, 0x98, 0xc8, 0x76, 0xaf, - 0x40, 0x8e, 0x37, 0xa4, 0x44, 0x1e, 0x99, 0x0b, 0xaf, 0xe2, 0x6c, 0x74, 0x81, 0x83, 0x96, 0x21, - 0xcf, 0xbf, 0x64, 0xb1, 0x12, 0x8f, 0x2e, 0x91, 0xf0, 0x6d, 0x98, 0x15, 0x20, 0xd2, 0xb3, 0xe3, - 0x8e, 0x09, 0x33, 0x28, 0xfe, 0x18, 0xe6, 0xc2, 0x68, 0x13, 0xa9, 0xa4, 0x08, 0x99, 0x3a, 0x8f, - 0x90, 0x6b, 0x52, 0xc8, 0x67, 0xfd, 0xb6, 0x92, 0xf6, 0xa2, 0xbb, 0xae, 0xee, 0x48, 0x2a, 0xb2, - 0x23, 0xbe, 0x02, 0x92, 0xc4, 0x37, 0xaa, 0xc0, 0xac, 0x3c, 0x0e, 0x3b, 0xa6, 0xeb, 0x17, 0x43, - 0x1f, 0x01, 0x52, 0x81, 0xdf, 0xb4, 0x40, 0x1b, 0xe4, 0x85, 0x63, 0x74, 0x7a, 0xc4, 0x0f, 0xf5, - 0xb4, 0xca, 0x54, 0x81, 0x13, 0x05, 0xc7, 0x3f, 0x68, 0x50, 0x5a, 0xeb, 0x1a, 0x4e, 0x4f, 0x6e, - 0xd6, 0xdb, 0x90, 0xe3, 0xe5, 0xab, 0xb8, 0xf1, 0xdd, 0x09, 0x93, 0x51, 0x71, 0xf9, 0x60, 0x8d, - 0x17, 0xbb, 0x62, 0x15, 0xdd, 0x5c, 0xd1, 0x97, 0xdd, 0x88, 0xf4, 0x69, 0x37, 0xd0, 0xab, 0x90, - 0x35, 0xe8, 0x12, 0x16, 0x50, 0x2a, 0xd1, 0x8b, 0x03, 0xa3, 0xc6, 0x4a, 0x0d, 0x8e, 0x85, 0xdf, - 0x84, 0xa2, 0xc2, 0x81, 0xde, 0x87, 0x1e, 0x37, 0x44, 0x39, 0xb1, 0xb6, 0x7e, 0xb0, 0xf5, 0x9c, - 0x5f, 0x93, 0x2a, 0x00, 0x1b, 0x0d, 0x7f, 0x9c, 0xc2, 0xef, 0x8b, 0x55, 0x22, 0xe4, 0xa8, 0xf2, - 0x68, 0x49, 0xf2, 0xa4, 0xce, 0x25, 0xcf, 0x09, 0x94, 0x85, 0xfa, 0x13, 0x9d, 0x81, 0xd7, 0x21, - 0xc7, 0xe8, 0xc9, 0x23, 0x30, 0x1f, 0xc3, 0x56, 0x46, 0x0b, 0x8e, 0x88, 0x67, 0xa0, 0xbc, 0xef, - 0x19, 0xde, 0xc0, 0x95, 0x47, 0xe0, 0xf7, 0x1a, 0x54, 0x24, 0x64, 0xd2, 0xe6, 0x90, 0xbc, 0x54, - 0xf3, 0x20, 0xec, 0x5f, 0xa9, 0x2f, 0x41, 0xae, 0x7d, 0xb8, 0x6f, 0x7e, 0x24, 0x1b, 0x79, 0x62, - 0x44, 0xe1, 0x5d, 0xce, 0x87, 0x77, 0xd3, 0xc5, 0x88, 0x5e, 0xcf, 0x1c, 0xe3, 0x85, 0xb7, 0x65, - 0xb5, 0xc9, 0x09, 0xab, 0x82, 0x32, 0x7a, 0x00, 0x60, 0x37, 0x2a, 0xd1, 0x75, 0x67, 0xa5, 0x8f, - 0xda, 0x85, 0x9f, 0x85, 0x0b, 0x6b, 0x03, 0xef, 0xa8, 0x61, 0x19, 0x87, 0x5d, 0x19, 0x34, 0xf0, - 0x1c, 0x20, 0x0a, 0xdc, 0x30, 0x5d, 0x15, 0xda, 0x80, 0x59, 0x0a, 0x25, 0x96, 0x67, 0xb6, 0x94, - 0x08, 0x23, 0xf3, 0x88, 0x16, 0xc9, 0x23, 0x86, 0xeb, 0xbe, 0xb4, 0x9d, 0xb6, 0x50, 0xcd, 0x1f, - 0xe3, 0x0d, 0x4e, 0xfc, 0x99, 0x1b, 0xca, 0x14, 0x5f, 0x95, 0xca, 0x52, 0x40, 0xe5, 0x31, 0xf1, - 0xc6, 0x50, 0xc1, 0x0f, 0xe0, 0xa2, 0xc4, 0x14, 0x5d, 0x97, 0x31, 0xc8, 0x7b, 0x70, 0x4d, 0x22, - 0xaf, 0x1f, 0xd1, 0xbb, 0xc0, 0x53, 0xc1, 0xf0, 0xdf, 0x95, 0xf3, 0x11, 0xd4, 0x7c, 0x39, 0x59, - 0xe9, 0x67, 0x77, 0x55, 0x01, 0x06, 0xae, 0x38, 0x33, 0x05, 0x9d, 0x7d, 0x53, 0x98, 0x63, 0x77, - 0xfd, 0xac, 0x4c, 0xbf, 0xf1, 0x3a, 0xcc, 0x4b, 0x1a, 0xa2, 0x28, 0x0b, 0x13, 0x19, 0x11, 0x28, - 0x8e, 0x88, 0x30, 0x18, 0x5d, 0x3a, 0xde, 0xec, 0x2a, 0x66, 0xd8, 0xb4, 0x8c, 0xa6, 0xa6, 0xd0, - 0xbc, 0xc8, 0x4f, 0x04, 0x15, 0x4c, 0x0d, 0xda, 0x02, 0x4c, 0x09, 0xa8, 0x60, 0xb1, 0x11, 0x14, - 0x3c, 0xb2, 0x11, 0x23, 0xa4, 0x3f, 0x80, 0x05, 0x5f, 0x08, 0x6a, 0xb7, 0xa7, 0xc4, 0xe9, 0x99, - 0xae, 0xab, 0xf4, 0x09, 0xe2, 0x14, 0xbf, 0x03, 0x99, 0x3e, 0x11, 0x31, 0xa5, 0xb8, 0x8a, 0x96, - 0xf9, 0xdb, 0xd8, 0xb2, 0xb2, 0x98, 0xcd, 0xe3, 0x36, 0x5c, 0x97, 0xd4, 0xb9, 0x45, 0x63, 0xc9, - 0x47, 0x85, 0x92, 0x77, 0x48, 0x6e, 0xd6, 0xd1, 0x3b, 0x64, 0x9a, 0xef, 0xbd, 0xbc, 0x43, 0xd2, - 0x5c, 0xa1, 0xfa, 0xd6, 0x44, 0xb9, 0x62, 0x9b, 0xdb, 0xd4, 0x77, 0xc9, 0x89, 0x88, 0x1d, 0xc2, - 0x5c, 0xd8, 0x93, 0x27, 0x0a, 0x63, 0x73, 0x90, 0xf5, 0xec, 0x63, 0x22, 0x83, 0x18, 0x1f, 0x48, - 0x81, 0x7d, 0x37, 0x9f, 0x48, 0x60, 0x23, 0x20, 0xc6, 0x8e, 0xe4, 0xa4, 0xf2, 0xd2, 0xdd, 0x94, - 0xf5, 0x0f, 0x1f, 0xe0, 0x5d, 0xb8, 0x14, 0x0d, 0x13, 0x13, 0x89, 0xfc, 0x9c, 0x1f, 0xe0, 0xb8, - 0x48, 0x32, 0x11, 0xdd, 0x77, 0x83, 0x60, 0xa0, 0x04, 0x94, 0x89, 0x48, 0xea, 0x50, 0x8f, 0x8b, - 0x2f, 0xff, 0x89, 0xf3, 0xea, 0x87, 0x9b, 0x89, 0x88, 0xb9, 0x01, 0xb1, 0xc9, 0xb7, 0x3f, 0x88, - 0x11, 0xe9, 0xb1, 0x31, 0x42, 0x38, 0x49, 0x10, 0xc5, 0xbe, 0x86, 0x43, 0x27, 0x78, 0x04, 0x01, - 0x74, 0x52, 0x1e, 0x34, 0x87, 0xf8, 0x3c, 0xd8, 0x40, 0x1e, 0x6c, 0x35, 0xec, 0x4e, 0xb4, 0x19, - 0xef, 0x05, 0xb1, 0x73, 0x24, 0x32, 0x4f, 0x44, 0xf8, 0x7d, 0x58, 0x4c, 0x0e, 0xca, 0x93, 0x50, - 0xbe, 0x8f, 0xa1, 0xe0, 0x17, 0x94, 0xca, 0xbb, 0x72, 0x11, 0xf2, 0xbb, 0x7b, 0xfb, 0x4f, 0xd7, - 0xd6, 0x1b, 0x55, 0x6d, 0xf5, 0x1f, 0x69, 0x48, 0x6d, 0x3f, 0x47, 0xdf, 0x82, 0x2c, 0x7f, 0x2e, - 0x1a, 0xf3, 0x9a, 0x56, 0x1f, 0xf7, 0xf0, 0x84, 0xaf, 0x7e, 0xf2, 0xa7, 0xbf, 0x7e, 0x9e, 0xba, - 0x84, 0x2f, 0xac, 0x0c, 0xdf, 0x30, 0xba, 0xfd, 0x23, 0x63, 0xe5, 0x78, 0xb8, 0xc2, 0x72, 0xc2, - 0x43, 0xed, 0x3e, 0x7a, 0x0e, 0xe9, 0xa7, 0x03, 0x0f, 0x25, 0x3e, 0xb5, 0xd5, 0x93, 0x1f, 0xa4, - 0x70, 0x9d, 0x51, 0x9e, 0xc3, 0x33, 0x2a, 0xe5, 0xfe, 0xc0, 0xa3, 0x74, 0x87, 0x50, 0x54, 0xde, - 0x94, 0xd0, 0x99, 0x8f, 0x70, 0xf5, 0xb3, 0xdf, 0xab, 0x30, 0x66, 0xfc, 0xae, 0xe2, 0xcb, 0x2a, - 0x3f, 0xfe, 0xf4, 0xa5, 0xea, 0x73, 0x70, 0x62, 0x45, 0xf5, 0x09, 0x9e, 0x45, 0xa2, 0xfa, 0x28, - 0x4f, 0x11, 0xf1, 0xfa, 0x78, 0x27, 0x16, 0xa5, 0x6b, 0x8b, 0x77, 0xb0, 0x96, 0x87, 0xae, 0xc7, - 0xbc, 0xa3, 0xa8, 0x2f, 0x06, 0xf5, 0xc5, 0x64, 0x04, 0xc1, 0xe9, 0x06, 0xe3, 0x74, 0x05, 0x5f, - 0x52, 0x39, 0xb5, 0x7c, 0xbc, 0x87, 0xda, 0xfd, 0xd5, 0x23, 0xc8, 0xb2, 0x3e, 0x27, 0x6a, 0xca, - 0x8f, 0x7a, 0x4c, 0x87, 0x36, 0xe1, 0x04, 0x84, 0x3a, 0xa4, 0x78, 0x9e, 0x71, 0x9b, 0xc5, 0x15, - 0x9f, 0x1b, 0x6b, 0x75, 0x3e, 0xd4, 0xee, 0x2f, 0x69, 0xaf, 0x69, 0xab, 0xdf, 0xcb, 0x40, 0x96, - 0xb5, 0x8e, 0x50, 0x1f, 0x20, 0x68, 0x0a, 0x46, 0xf5, 0x1c, 0x69, 0x33, 0x46, 0xf5, 0x1c, 0xed, - 0x27, 0xe2, 0xeb, 0x8c, 0xf3, 0x3c, 0x9e, 0xf3, 0x39, 0xb3, 0x57, 0xfb, 0x15, 0xd6, 0x24, 0xa2, - 0x66, 0x7d, 0x09, 0x45, 0xa5, 0xb9, 0x87, 0xe2, 0x28, 0x86, 0xba, 0x83, 0xd1, 0x63, 0x12, 0xd3, - 0x19, 0xc4, 0x37, 0x19, 0xd3, 0x6b, 0xb8, 0xa6, 0x1a, 0x97, 0xf3, 0x75, 0x18, 0x26, 0x65, 0xfc, - 0xa9, 0x06, 0x95, 0x70, 0x83, 0x0f, 0xdd, 0x8c, 0x21, 0x1d, 0xed, 0x13, 0xd6, 0x6f, 0x8d, 0x47, - 0x4a, 0x14, 0x81, 0xf3, 0x3f, 0x26, 0xa4, 0x6f, 0x50, 0x4c, 0x61, 0x7b, 0xf4, 0x7d, 0x0d, 0x66, - 0x22, 0x6d, 0x3b, 0x14, 0xc7, 0x62, 0xa4, 0x29, 0x58, 0xbf, 0x7d, 0x06, 0x96, 0x90, 0xe4, 0x2e, - 0x93, 0xe4, 0x06, 0xbe, 0x3a, 0x6a, 0x0c, 0xcf, 0xec, 0x11, 0xcf, 0x16, 0xd2, 0xac, 0xfe, 0x33, - 0x0d, 0xf9, 0x75, 0xfe, 0x13, 0x2b, 0xe4, 0x41, 0xc1, 0xef, 0x84, 0xa1, 0x85, 0xb8, 0xae, 0x44, - 0x50, 0xb2, 0xd7, 0xaf, 0x27, 0xce, 0x0b, 0x11, 0xee, 0x30, 0x11, 0x16, 0xf1, 0x15, 0x5f, 0x04, - 0xf1, 0x53, 0xae, 0x15, 0x7e, 0xf9, 0x5e, 0x31, 0xda, 0x6d, 0xba, 0x25, 0xdf, 0xd5, 0xa0, 0xa4, - 0x36, 0xac, 0xd0, 0x8d, 0xd8, 0x7e, 0x88, 0xda, 0xf3, 0xaa, 0xe3, 0x71, 0x28, 0x82, 0xff, 0x3d, - 0xc6, 0xff, 0x26, 0x5e, 0x48, 0xe2, 0xef, 0x30, 0xfc, 0xb0, 0x08, 0xbc, 0xe5, 0x14, 0x2f, 0x42, - 0xa8, 0xa3, 0x15, 0x2f, 0x42, 0xb8, 0x63, 0x75, 0xb6, 0x08, 0x03, 0x86, 0x4f, 0x45, 0x38, 0x01, - 0x08, 0x3a, 0x4c, 0x28, 0xd6, 0xb8, 0xca, 0x25, 0x26, 0xea, 0x83, 0xa3, 0xcd, 0xa9, 0x98, 0x13, - 0x10, 0xe1, 0xdd, 0x35, 0x5d, 0xea, 0x8b, 0xab, 0xbf, 0xcd, 0x40, 0xf1, 0x89, 0x61, 0x5a, 0x1e, - 0xb1, 0x0c, 0xab, 0x45, 0x50, 0x07, 0xb2, 0x2c, 0x4b, 0x45, 0x03, 0x8f, 0xda, 0xf6, 0x89, 0x06, - 0x9e, 0x50, 0x4f, 0x04, 0xdf, 0x66, 0xac, 0xaf, 0xe3, 0xba, 0xcf, 0xba, 0x17, 0xd0, 0x5f, 0x61, - 0xfd, 0x0c, 0xaa, 0xf2, 0x31, 0xe4, 0x78, 0xff, 0x02, 0x45, 0xa8, 0x85, 0xfa, 0x1c, 0xf5, 0xab, - 0xf1, 0x93, 0x89, 0xa7, 0x4c, 0xe5, 0xe5, 0x32, 0x64, 0xca, 0xec, 0xdb, 0x00, 0x41, 0xc3, 0x2c, - 0x6a, 0xdf, 0x91, 0xfe, 0x5a, 0x7d, 0x31, 0x19, 0x41, 0x30, 0xbe, 0xcf, 0x18, 0xdf, 0xc2, 0xd7, - 0x63, 0x19, 0xb7, 0xfd, 0x05, 0x94, 0x79, 0x0b, 0x32, 0x9b, 0x86, 0x7b, 0x84, 0x22, 0x49, 0x48, - 0x79, 0xdb, 0xad, 0xd7, 0xe3, 0xa6, 0x04, 0xab, 0x5b, 0x8c, 0xd5, 0x02, 0x9e, 0x8f, 0x65, 0x75, - 0x64, 0xb8, 0x34, 0xa6, 0xa3, 0x01, 0x4c, 0xcb, 0xf7, 0x5a, 0x74, 0x2d, 0x62, 0xb3, 0xf0, 0xdb, - 0x6e, 0x7d, 0x21, 0x69, 0x5a, 0x30, 0x5c, 0x62, 0x0c, 0x31, 0xbe, 0x16, 0x6f, 0x54, 0x81, 0xfe, - 0x50, 0xbb, 0xff, 0x9a, 0xb6, 0xfa, 0xc3, 0x2a, 0x64, 0x68, 0xbd, 0x44, 0xb3, 0x48, 0x70, 0xcd, - 0x8c, 0x5a, 0x78, 0xa4, 0xb9, 0x13, 0xb5, 0xf0, 0xe8, 0x0d, 0x35, 0x26, 0x8b, 0xb0, 0x1f, 0x9a, - 0x12, 0x86, 0x45, 0x35, 0xf6, 0xa0, 0xa8, 0x5c, 0x46, 0x51, 0x0c, 0xc5, 0x70, 0xeb, 0x28, 0x9a, - 0x45, 0x62, 0x6e, 0xb2, 0x78, 0x91, 0x31, 0xad, 0xe3, 0x8b, 0x61, 0xa6, 0x6d, 0x8e, 0x46, 0xb9, - 0x7e, 0x0c, 0x25, 0xf5, 0xd6, 0x8a, 0x62, 0x88, 0x46, 0x7a, 0x53, 0xd1, 0x58, 0x11, 0x77, 0xe9, - 0x8d, 0x71, 0x1a, 0xff, 0x67, 0xb5, 0x12, 0x97, 0x72, 0xff, 0x10, 0xf2, 0xe2, 0x2e, 0x1b, 0xa7, - 0x6f, 0xb8, 0x9b, 0x15, 0xa7, 0x6f, 0xe4, 0x22, 0x1c, 0x53, 0x92, 0x30, 0xb6, 0xb4, 0x66, 0x97, - 0x01, 0x5a, 0xb0, 0x7c, 0x4c, 0xbc, 0x24, 0x96, 0x41, 0x7f, 0x26, 0x89, 0xa5, 0x72, 0x5f, 0x1a, - 0xcb, 0xb2, 0x43, 0x3c, 0x71, 0x96, 0xe5, 0x65, 0x04, 0x25, 0x50, 0x54, 0xa3, 0x21, 0x1e, 0x87, - 0x92, 0x58, 0x45, 0x06, 0x5c, 0x45, 0x28, 0x44, 0xdf, 0x01, 0x08, 0x2e, 0xde, 0xd1, 0xc2, 0x20, - 0xb6, 0x7b, 0x17, 0x2d, 0x0c, 0xe2, 0xef, 0xee, 0x31, 0x1e, 0x1c, 0x30, 0xe7, 0x95, 0x2c, 0x65, - 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x88, 0x67, 0x11, 0xdb, 0x18, 0xac, 0xbf, 0x72, - 0x3e, 0xe4, 0xc4, 0xe8, 0x19, 0xc8, 0xd5, 0x62, 0x4b, 0xfa, 0x2f, 0xa9, 0x64, 0x9f, 0x69, 0x50, - 0x0e, 0x5d, 0xf5, 0xd1, 0x9d, 0x84, 0x7d, 0x8e, 0x34, 0x17, 0xeb, 0x77, 0xcf, 0xc4, 0x4b, 0xac, - 0x9d, 0x94, 0x53, 0x21, 0xeb, 0xc6, 0x1f, 0x68, 0x50, 0x09, 0xf7, 0x07, 0x50, 0x02, 0x83, 0x91, - 0x0e, 0x65, 0x7d, 0xe9, 0x6c, 0xc4, 0x73, 0xec, 0x56, 0x50, 0x4a, 0x7e, 0x08, 0x79, 0xd1, 0x56, - 0x88, 0x73, 0x8b, 0x70, 0x83, 0x33, 0xce, 0x2d, 0x22, 0x3d, 0x89, 0x24, 0xb7, 0xa0, 0x37, 0x74, - 0xc5, 0x13, 0x45, 0xf3, 0x21, 0x89, 0xe5, 0x78, 0x4f, 0x8c, 0x74, 0x2e, 0xc6, 0xb2, 0x0c, 0x3c, - 0x51, 0xb6, 0x1e, 0x50, 0x02, 0xc5, 0x33, 0x3c, 0x31, 0xda, 0xb9, 0x48, 0xf2, 0x44, 0xc6, 0x55, - 0xf1, 0xc4, 0xa0, 0x53, 0x10, 0xe7, 0x89, 0x23, 0xed, 0xdb, 0x38, 0x4f, 0x1c, 0x6d, 0x36, 0x24, - 0xed, 0x2d, 0x63, 0x1e, 0xf2, 0xc4, 0xd9, 0x98, 0xce, 0x02, 0x7a, 0x25, 0xc1, 0xa6, 0xb1, 0xad, - 0xe1, 0xfa, 0xab, 0xe7, 0xc4, 0x1e, 0xef, 0x01, 0x7c, 0x37, 0xa4, 0x07, 0xfc, 0x42, 0x83, 0xb9, - 0xb8, 0xd6, 0x04, 0x4a, 0x60, 0x96, 0xd0, 0x57, 0xae, 0x2f, 0x9f, 0x17, 0xfd, 0x1c, 0x76, 0xf3, - 0x7d, 0xe2, 0x51, 0xf5, 0x77, 0x5f, 0x2e, 0x68, 0x7f, 0xfc, 0x72, 0x41, 0xfb, 0xf3, 0x97, 0x0b, - 0xda, 0x4f, 0xff, 0xb2, 0x30, 0x75, 0x98, 0x63, 0xff, 0xdb, 0xe3, 0x8d, 0x7f, 0x05, 0x00, 0x00, - 0xff, 0xff, 0x63, 0x1c, 0x78, 0x24, 0x74, 0x32, 0x00, 0x00, + // 3549 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5f, 0x6f, 0x1b, 0xc7, + 0xb5, 0xd7, 0x92, 0x22, 0x29, 0x1e, 0xfe, 0x11, 0x35, 0x92, 0x6d, 0x6a, 0x6d, 0xcb, 0xf2, 0xf8, + 0x9f, 0x6c, 0xc7, 0x52, 0xa2, 0xe4, 0xde, 0x07, 0xdf, 0x20, 0xb8, 0xb2, 0xc4, 0x58, 0x8a, 0x64, + 0xc9, 0x59, 0xc9, 0x4e, 0x2e, 0x10, 0x5c, 0x62, 0x45, 0x8e, 0xa5, 0x85, 0xc8, 0x5d, 0x66, 0x77, + 0x49, 0x4b, 0x69, 0x0a, 0x14, 0x69, 0x82, 0xa2, 0x05, 0xfa, 0xd2, 0x3c, 0xf4, 0xdf, 0x63, 0x51, + 0x14, 0xf9, 0x00, 0x45, 0x3f, 0x40, 0x81, 0xa2, 0xe8, 0x4b, 0x0b, 0xf4, 0x0b, 0x14, 0x69, 0xbf, + 0x46, 0xd1, 0x62, 0xfe, 0xed, 0xce, 0x2e, 0x77, 0x25, 0x25, 0x6c, 0xf2, 0x62, 0xed, 0x9c, 0x39, + 0x73, 0x7e, 0x67, 0xce, 0xcc, 0x39, 0x67, 0xe6, 0x0c, 0x0d, 0x45, 0xb7, 0xd7, 0x5a, 0xec, 0xb9, + 0x8e, 0xef, 0xa0, 0x32, 0xf1, 0x5b, 0x6d, 0x8f, 0xb8, 0x03, 0xe2, 0xf6, 0xf6, 0xf5, 0x99, 0x03, + 0xe7, 0xc0, 0x61, 0x1d, 0x4b, 0xf4, 0x8b, 0xf3, 0xe8, 0xb3, 0x94, 0x67, 0xa9, 0x3b, 0x68, 0xb5, + 0xd8, 0x3f, 0xbd, 0xfd, 0xa5, 0xa3, 0x81, 0xe8, 0xba, 0xcc, 0xba, 0xcc, 0xbe, 0x7f, 0xc8, 0xfe, + 0xe9, 0xed, 0xb3, 0x3f, 0xa2, 0xf3, 0xca, 0x81, 0xe3, 0x1c, 0x74, 0xc8, 0x92, 0xd9, 0xb3, 0x96, + 0x4c, 0xdb, 0x76, 0x7c, 0xd3, 0xb7, 0x1c, 0xdb, 0xe3, 0xbd, 0xf8, 0x33, 0x0d, 0xaa, 0x06, 0xf1, + 0x7a, 0x8e, 0xed, 0x91, 0x75, 0x62, 0xb6, 0x89, 0x8b, 0xae, 0x02, 0xb4, 0x3a, 0x7d, 0xcf, 0x27, + 0x6e, 0xd3, 0x6a, 0xd7, 0xb5, 0x79, 0x6d, 0x61, 0xdc, 0x28, 0x0a, 0xca, 0x46, 0x1b, 0x5d, 0x86, + 0x62, 0x97, 0x74, 0xf7, 0x79, 0x6f, 0x86, 0xf5, 0x4e, 0x70, 0xc2, 0x46, 0x1b, 0xe9, 0x30, 0xe1, + 0x92, 0x81, 0xe5, 0x59, 0x8e, 0x5d, 0xcf, 0xce, 0x6b, 0x0b, 0x59, 0x23, 0x68, 0xd3, 0x81, 0xae, + 0xf9, 0xc2, 0x6f, 0xfa, 0xc4, 0xed, 0xd6, 0xc7, 0xf9, 0x40, 0x4a, 0xd8, 0x23, 0x6e, 0x17, 0x7f, + 0x9a, 0x83, 0xb2, 0x61, 0xda, 0x07, 0xc4, 0x20, 0x1f, 0xf6, 0x89, 0xe7, 0xa3, 0x1a, 0x64, 0x8f, + 0xc8, 0x09, 0x83, 0x2f, 0x1b, 0xf4, 0x93, 0x8f, 0xb7, 0x0f, 0x48, 0x93, 0xd8, 0x1c, 0xb8, 0x4c, + 0xc7, 0xdb, 0x07, 0xa4, 0x61, 0xb7, 0xd1, 0x0c, 0xe4, 0x3a, 0x56, 0xd7, 0xf2, 0x05, 0x2a, 0x6f, + 0x44, 0xd4, 0x19, 0x8f, 0xa9, 0xb3, 0x0a, 0xe0, 0x39, 0xae, 0xdf, 0x74, 0xdc, 0x36, 0x71, 0xeb, + 0xb9, 0x79, 0x6d, 0xa1, 0xba, 0x7c, 0x73, 0x51, 0x5d, 0x88, 0x45, 0x55, 0xa1, 0xc5, 0x5d, 0xc7, + 0xf5, 0x77, 0x28, 0xaf, 0x51, 0xf4, 0xe4, 0x27, 0x7a, 0x1b, 0x4a, 0x4c, 0x88, 0x6f, 0xba, 0x07, + 0xc4, 0xaf, 0xe7, 0x99, 0x94, 0x5b, 0x67, 0x48, 0xd9, 0x63, 0xcc, 0x06, 0x83, 0xe7, 0xdf, 0x08, + 0x43, 0xd9, 0x23, 0xae, 0x65, 0x76, 0xac, 0x8f, 0xcc, 0xfd, 0x0e, 0xa9, 0x17, 0xe6, 0xb5, 0x85, + 0x09, 0x23, 0x42, 0xa3, 0xf3, 0x3f, 0x22, 0x27, 0x5e, 0xd3, 0xb1, 0x3b, 0x27, 0xf5, 0x09, 0xc6, + 0x30, 0x41, 0x09, 0x3b, 0x76, 0xe7, 0x84, 0x2d, 0x9a, 0xd3, 0xb7, 0x7d, 0xde, 0x5b, 0x64, 0xbd, + 0x45, 0x46, 0x61, 0xdd, 0x0b, 0x50, 0xeb, 0x5a, 0x76, 0xb3, 0xeb, 0xb4, 0x9b, 0x81, 0x41, 0x80, + 0x19, 0xa4, 0xda, 0xb5, 0xec, 0x27, 0x4e, 0xdb, 0x90, 0x66, 0xa1, 0x9c, 0xe6, 0x71, 0x94, 0xb3, + 0x24, 0x38, 0xcd, 0x63, 0x95, 0x73, 0x11, 0xa6, 0xa9, 0xcc, 0x96, 0x4b, 0x4c, 0x9f, 0x84, 0xcc, + 0x65, 0xc6, 0x3c, 0xd5, 0xb5, 0xec, 0x55, 0xd6, 0x13, 0xe1, 0x37, 0x8f, 0x87, 0xf8, 0x2b, 0x82, + 0xdf, 0x3c, 0x8e, 0xf2, 0xe3, 0x45, 0x28, 0x06, 0x36, 0x47, 0x13, 0x30, 0xbe, 0xbd, 0xb3, 0xdd, + 0xa8, 0x8d, 0x21, 0x80, 0xfc, 0xca, 0xee, 0x6a, 0x63, 0x7b, 0xad, 0xa6, 0xa1, 0x12, 0x14, 0xd6, + 0x1a, 0xbc, 0x91, 0xc1, 0x8f, 0x00, 0x42, 0xeb, 0xa2, 0x02, 0x64, 0x37, 0x1b, 0xff, 0x57, 0x1b, + 0xa3, 0x3c, 0xcf, 0x1b, 0xc6, 0xee, 0xc6, 0xce, 0x76, 0x4d, 0xa3, 0x83, 0x57, 0x8d, 0xc6, 0xca, + 0x5e, 0xa3, 0x96, 0xa1, 0x1c, 0x4f, 0x76, 0xd6, 0x6a, 0x59, 0x54, 0x84, 0xdc, 0xf3, 0x95, 0xad, + 0x67, 0x8d, 0xda, 0x38, 0xfe, 0x5c, 0x83, 0x8a, 0x58, 0x2f, 0xee, 0x13, 0xe8, 0x0d, 0xc8, 0x1f, + 0x32, 0xbf, 0x60, 0x5b, 0xb1, 0xb4, 0x7c, 0x25, 0xb6, 0xb8, 0x11, 0xdf, 0x31, 0x04, 0x2f, 0xc2, + 0x90, 0x3d, 0x1a, 0x78, 0xf5, 0xcc, 0x7c, 0x76, 0xa1, 0xb4, 0x5c, 0x5b, 0xe4, 0x0e, 0xbb, 0xb8, + 0x49, 0x4e, 0x9e, 0x9b, 0x9d, 0x3e, 0x31, 0x68, 0x27, 0x42, 0x30, 0xde, 0x75, 0x5c, 0xc2, 0x76, + 0xec, 0x84, 0xc1, 0xbe, 0xe9, 0x36, 0x66, 0x8b, 0x26, 0x76, 0x2b, 0x6f, 0xe0, 0x2f, 0x34, 0x80, + 0xa7, 0x7d, 0x3f, 0xdd, 0x35, 0x66, 0x20, 0x37, 0xa0, 0x82, 0x85, 0x5b, 0xf0, 0x06, 0xf3, 0x09, + 0x62, 0x7a, 0x24, 0xf0, 0x09, 0xda, 0x40, 0x97, 0xa0, 0xd0, 0x73, 0xc9, 0xa0, 0x79, 0x34, 0x60, + 0x20, 0x13, 0x46, 0x9e, 0x36, 0x37, 0x07, 0xe8, 0x3a, 0x94, 0xad, 0x03, 0xdb, 0x71, 0x49, 0x93, + 0xcb, 0xca, 0xb1, 0xde, 0x12, 0xa7, 0x31, 0xbd, 0x15, 0x16, 0x2e, 0x38, 0xaf, 0xb2, 0x6c, 0x51, + 0x12, 0xb6, 0xa1, 0xc4, 0x54, 0x1d, 0xc9, 0x7c, 0x77, 0x43, 0x1d, 0x33, 0x6c, 0xd8, 0xb0, 0x09, + 0x85, 0xd6, 0xf8, 0x03, 0x40, 0x6b, 0xa4, 0x43, 0x7c, 0x32, 0x4a, 0xf4, 0x50, 0x6c, 0x92, 0x55, + 0x6d, 0x82, 0x7f, 0xa2, 0xc1, 0x74, 0x44, 0xfc, 0x48, 0xd3, 0xaa, 0x43, 0xa1, 0xcd, 0x84, 0x71, + 0x0d, 0xb2, 0x86, 0x6c, 0xa2, 0xfb, 0x30, 0x21, 0x14, 0xf0, 0xea, 0xd9, 0x94, 0x4d, 0x53, 0xe0, + 0x3a, 0x79, 0xf8, 0x8b, 0x0c, 0x14, 0xc5, 0x44, 0x77, 0x7a, 0x68, 0x05, 0x2a, 0x2e, 0x6f, 0x34, + 0xd9, 0x7c, 0x84, 0x46, 0x7a, 0x7a, 0x10, 0x5a, 0x1f, 0x33, 0xca, 0x62, 0x08, 0x23, 0xa3, 0xff, + 0x81, 0x92, 0x14, 0xd1, 0xeb, 0xfb, 0xc2, 0xe4, 0xf5, 0xa8, 0x80, 0x70, 0xff, 0xad, 0x8f, 0x19, + 0x20, 0xd8, 0x9f, 0xf6, 0x7d, 0xb4, 0x07, 0x33, 0x72, 0x30, 0x9f, 0x8d, 0x50, 0x23, 0xcb, 0xa4, + 0xcc, 0x47, 0xa5, 0x0c, 0x2f, 0xd5, 0xfa, 0x98, 0x81, 0xc4, 0x78, 0xa5, 0x53, 0x55, 0xc9, 0x3f, + 0xe6, 0xc1, 0x7b, 0x48, 0xa5, 0xbd, 0x63, 0x7b, 0x58, 0xa5, 0xbd, 0x63, 0xfb, 0x51, 0x11, 0x0a, + 0xa2, 0x85, 0x7f, 0x97, 0x01, 0x90, 0xab, 0xb1, 0xd3, 0x43, 0x6b, 0x50, 0x75, 0x45, 0x2b, 0x62, + 0xad, 0xcb, 0x89, 0xd6, 0x12, 0x8b, 0x38, 0x66, 0x54, 0xe4, 0x20, 0xae, 0xdc, 0x5b, 0x50, 0x0e, + 0xa4, 0x84, 0x06, 0x9b, 0x4d, 0x30, 0x58, 0x20, 0xa1, 0x24, 0x07, 0x50, 0x93, 0xbd, 0x07, 0x17, + 0x82, 0xf1, 0x09, 0x36, 0xbb, 0x7e, 0x8a, 0xcd, 0x02, 0x81, 0xd3, 0x52, 0x82, 0x6a, 0x35, 0x55, + 0xb1, 0xd0, 0x6c, 0xb3, 0x09, 0x66, 0x1b, 0x56, 0x8c, 0x1a, 0x0e, 0x68, 0xbe, 0xe4, 0x4d, 0xfc, + 0x87, 0x2c, 0x14, 0x56, 0x9d, 0x6e, 0xcf, 0x74, 0xe9, 0x6a, 0xe4, 0x5d, 0xe2, 0xf5, 0x3b, 0x3e, + 0x33, 0x57, 0x75, 0xf9, 0x46, 0x54, 0xa2, 0x60, 0x93, 0x7f, 0x0d, 0xc6, 0x6a, 0x88, 0x21, 0x74, + 0xb0, 0x48, 0x8f, 0x99, 0x73, 0x0c, 0x16, 0xc9, 0x51, 0x0c, 0x91, 0x8e, 0x9c, 0x0d, 0x1d, 0x59, + 0x87, 0xc2, 0x80, 0xb8, 0x61, 0x4a, 0x5f, 0x1f, 0x33, 0x24, 0x01, 0xdd, 0x85, 0xc9, 0x78, 0x7a, + 0xc9, 0x09, 0x9e, 0x6a, 0x2b, 0x9a, 0x8d, 0x6e, 0x40, 0x39, 0x92, 0xe3, 0xf2, 0x82, 0xaf, 0xd4, + 0x55, 0x52, 0xdc, 0x45, 0x19, 0x57, 0x69, 0x3e, 0x2e, 0xaf, 0x8f, 0xc9, 0xc8, 0x1a, 0x09, 0x26, + 0x13, 0xd1, 0x60, 0x82, 0xff, 0x17, 0x2a, 0x11, 0x43, 0xd0, 0xfc, 0xd2, 0x78, 0xf7, 0xd9, 0xca, + 0x16, 0x4f, 0x46, 0x8f, 0x59, 0xfe, 0x31, 0x6a, 0x1a, 0xcd, 0x69, 0x5b, 0x8d, 0xdd, 0xdd, 0x5a, + 0x06, 0x55, 0xa0, 0xb8, 0xbd, 0xb3, 0xd7, 0xe4, 0x5c, 0x59, 0xfc, 0x66, 0x20, 0x41, 0x24, 0x33, + 0x25, 0x87, 0x8d, 0x29, 0x39, 0x4c, 0x93, 0x39, 0x2c, 0x13, 0xe6, 0xb0, 0xec, 0xa3, 0x2a, 0x94, + 0xb9, 0xf1, 0x9a, 0x7d, 0x9b, 0xe6, 0xd1, 0x5f, 0x69, 0x00, 0xa1, 0xab, 0xa0, 0x25, 0x28, 0xb4, + 0xb8, 0xf0, 0xba, 0xc6, 0x22, 0xcd, 0x85, 0xc4, 0xf5, 0x30, 0x24, 0x17, 0x7a, 0x0d, 0x0a, 0x5e, + 0xbf, 0xd5, 0x22, 0x9e, 0xcc, 0x67, 0x97, 0xe2, 0xc1, 0x4e, 0x84, 0x22, 0x43, 0xf2, 0xd1, 0x21, + 0x2f, 0x4c, 0xab, 0xd3, 0x67, 0xd9, 0xed, 0xf4, 0x21, 0x82, 0x0f, 0xff, 0x5c, 0x83, 0x92, 0xb2, + 0x33, 0xbf, 0x66, 0x84, 0xbd, 0x02, 0x45, 0xa6, 0x03, 0x69, 0x8b, 0x18, 0x3b, 0x61, 0x84, 0x04, + 0xf4, 0xdf, 0x50, 0x94, 0xdb, 0x5b, 0x86, 0xd9, 0x7a, 0xb2, 0xd8, 0x9d, 0x9e, 0x11, 0xb2, 0xe2, + 0x4d, 0x98, 0x62, 0x56, 0x69, 0xd1, 0x93, 0xb3, 0xb4, 0xa3, 0x7a, 0xb6, 0xd4, 0x62, 0x67, 0x4b, + 0x1d, 0x26, 0x7a, 0x87, 0x27, 0x9e, 0xd5, 0x32, 0x3b, 0x42, 0x8b, 0xa0, 0x8d, 0xdf, 0x01, 0xa4, + 0x0a, 0x1b, 0x65, 0xba, 0xb8, 0x02, 0xa5, 0x75, 0xd3, 0x3b, 0x14, 0x2a, 0xe1, 0xf7, 0xa1, 0xcc, + 0x9b, 0x23, 0xd9, 0x10, 0xc1, 0xf8, 0xa1, 0xe9, 0x1d, 0x32, 0xc5, 0x2b, 0x06, 0xfb, 0xc6, 0x53, + 0x30, 0xb9, 0x6b, 0x9b, 0x3d, 0xef, 0xd0, 0x91, 0x59, 0x80, 0xde, 0x1c, 0x6a, 0x21, 0x6d, 0x24, + 0xc4, 0x3b, 0x30, 0xe9, 0x92, 0xae, 0x69, 0xd9, 0x96, 0x7d, 0xd0, 0xdc, 0x3f, 0xf1, 0x89, 0x27, + 0x2e, 0x16, 0xd5, 0x80, 0xfc, 0x88, 0x52, 0xa9, 0x6a, 0xfb, 0x1d, 0x67, 0x5f, 0x84, 0x03, 0xf6, + 0x8d, 0x7f, 0xab, 0x41, 0xf9, 0x3d, 0xd3, 0x6f, 0x49, 0x2b, 0xa0, 0x0d, 0xa8, 0x06, 0x41, 0x80, + 0x51, 0x84, 0x2e, 0xb1, 0x54, 0xc4, 0xc6, 0xc8, 0x23, 0xa7, 0xcc, 0x22, 0x95, 0x96, 0x4a, 0x60, + 0xa2, 0x4c, 0xbb, 0x45, 0x3a, 0x81, 0xa8, 0x4c, 0xba, 0x28, 0xc6, 0xa8, 0x8a, 0x52, 0x09, 0x8f, + 0x26, 0xc3, 0x34, 0xcd, 0xdd, 0xf2, 0x17, 0x19, 0x40, 0xc3, 0x3a, 0x7c, 0xd5, 0x93, 0xcb, 0x2d, + 0xa8, 0x7a, 0xbe, 0xe9, 0xfa, 0xcd, 0xd8, 0xb5, 0xab, 0xc2, 0xa8, 0x41, 0x20, 0xbb, 0x03, 0x93, + 0x3d, 0xd7, 0x39, 0x70, 0x89, 0xe7, 0x35, 0x6d, 0xc7, 0xb7, 0x5e, 0x9c, 0x88, 0xc3, 0x5f, 0x55, + 0x92, 0xb7, 0x19, 0x15, 0x35, 0xa0, 0xf0, 0xc2, 0xea, 0xf8, 0xc4, 0xf5, 0xea, 0xb9, 0xf9, 0xec, + 0x42, 0x75, 0xf9, 0xfe, 0x59, 0x56, 0x5b, 0x7c, 0x9b, 0xf1, 0xef, 0x9d, 0xf4, 0x88, 0x21, 0xc7, + 0xaa, 0x07, 0xaa, 0x7c, 0xe4, 0x40, 0x75, 0x0b, 0x20, 0xe4, 0xa7, 0x51, 0x6b, 0x7b, 0xe7, 0xe9, + 0xb3, 0xbd, 0xda, 0x18, 0x2a, 0xc3, 0xc4, 0xf6, 0xce, 0x5a, 0x63, 0xab, 0x41, 0xe3, 0x1a, 0x5e, + 0x92, 0xb6, 0x51, 0x6d, 0x88, 0x66, 0x61, 0xe2, 0x25, 0xa5, 0xca, 0x7b, 0x69, 0xd6, 0x28, 0xb0, + 0xf6, 0x46, 0x1b, 0xff, 0x38, 0x03, 0x15, 0xb1, 0x0b, 0x46, 0xda, 0x8a, 0x2a, 0x44, 0x26, 0x02, + 0x41, 0x4f, 0x6f, 0x7c, 0x77, 0xb4, 0xc5, 0x21, 0x51, 0x36, 0xa9, 0xbb, 0xf3, 0xc5, 0x26, 0x6d, + 0x61, 0xd6, 0xa0, 0x8d, 0xee, 0x42, 0xad, 0xc5, 0xdd, 0x3d, 0x96, 0x93, 0x8c, 0x49, 0x41, 0x57, + 0x52, 0x52, 0x25, 0xd8, 0x6d, 0xa6, 0x27, 0x72, 0x52, 0xd1, 0x28, 0xcb, 0x8d, 0x44, 0x69, 0xe8, + 0x16, 0xe4, 0xc9, 0x80, 0xd8, 0xbe, 0x57, 0x2f, 0xb1, 0x00, 0x56, 0x91, 0xe7, 0xc4, 0x06, 0xa5, + 0x1a, 0xa2, 0x13, 0xff, 0x17, 0x4c, 0xb1, 0xf3, 0xf8, 0x63, 0xd7, 0xb4, 0xd5, 0x8b, 0xc3, 0xde, + 0xde, 0x96, 0x30, 0x1d, 0xfd, 0x44, 0x55, 0xc8, 0x6c, 0xac, 0x89, 0x89, 0x66, 0x36, 0xd6, 0xf0, + 0x27, 0x1a, 0x20, 0x75, 0xdc, 0x48, 0xb6, 0x8c, 0x09, 0x97, 0xf0, 0xd9, 0x10, 0x7e, 0x06, 0x72, + 0xc4, 0x75, 0x1d, 0x97, 0x59, 0xad, 0x68, 0xf0, 0x06, 0xbe, 0x29, 0x74, 0x30, 0xc8, 0xc0, 0x39, + 0x0a, 0x1c, 0x83, 0x4b, 0xd3, 0x02, 0x55, 0x37, 0x61, 0x3a, 0xc2, 0x35, 0x52, 0x20, 0xbd, 0x03, + 0x17, 0x98, 0xb0, 0x4d, 0x42, 0x7a, 0x2b, 0x1d, 0x6b, 0x90, 0x8a, 0xda, 0x83, 0x8b, 0x71, 0xc6, + 0x6f, 0xd6, 0x46, 0xf8, 0x4d, 0x81, 0xb8, 0x67, 0x75, 0xc9, 0x9e, 0xb3, 0x95, 0xae, 0x1b, 0x8d, + 0x8e, 0x47, 0xe4, 0xc4, 0x13, 0x19, 0x87, 0x7d, 0xe3, 0x5f, 0x6b, 0x70, 0x69, 0x68, 0xf8, 0x37, + 0xbc, 0xaa, 0x73, 0x00, 0x07, 0x74, 0xfb, 0x90, 0x36, 0xed, 0xe0, 0x37, 0x59, 0x85, 0x12, 0xe8, + 0x49, 0x03, 0x4c, 0x59, 0xe8, 0x79, 0x08, 0xf9, 0x27, 0xac, 0x88, 0xa4, 0xcc, 0x6a, 0x5c, 0xce, + 0xca, 0x36, 0xbb, 0xfc, 0x6a, 0x5b, 0x34, 0xd8, 0x37, 0xcb, 0xaf, 0x84, 0xb8, 0xcf, 0x8c, 0x2d, + 0x9e, 0xc7, 0x8b, 0x46, 0xd0, 0xa6, 0xe8, 0xad, 0x8e, 0x45, 0x6c, 0x9f, 0xf5, 0x8e, 0xb3, 0x5e, + 0x85, 0x82, 0x17, 0xa1, 0xc6, 0x91, 0x56, 0xda, 0x6d, 0x25, 0x97, 0x07, 0xf2, 0xb4, 0xa8, 0x3c, + 0xfc, 0x1b, 0x0d, 0xa6, 0x94, 0x01, 0x23, 0xd9, 0xee, 0x15, 0xc8, 0xf3, 0x52, 0x99, 0xc8, 0x23, + 0x33, 0xd1, 0x51, 0x1c, 0xc6, 0x10, 0x3c, 0x68, 0x11, 0x0a, 0xfc, 0x4b, 0x1e, 0x56, 0x92, 0xd9, + 0x25, 0x13, 0xbe, 0x05, 0xd3, 0x82, 0x44, 0xba, 0x4e, 0xd2, 0x36, 0x61, 0x06, 0xc5, 0x1f, 0xc3, + 0x4c, 0x94, 0x6d, 0xa4, 0x29, 0x29, 0x4a, 0x66, 0xce, 0xa3, 0xe4, 0x8a, 0x54, 0xf2, 0x59, 0xaf, + 0xad, 0xa4, 0xbd, 0xf8, 0xaa, 0xab, 0x2b, 0x92, 0x89, 0xad, 0x48, 0x30, 0x01, 0x29, 0xe2, 0x5b, + 0x9d, 0xc0, 0xb4, 0xdc, 0x0e, 0x5b, 0x96, 0x17, 0x1c, 0x86, 0x3e, 0x02, 0xa4, 0x12, 0xbf, 0x6d, + 0x85, 0xd6, 0xc8, 0x0b, 0xd7, 0x3c, 0xe8, 0x92, 0x20, 0xd4, 0xd3, 0x53, 0xa6, 0x4a, 0x1c, 0x29, + 0x38, 0x2e, 0xc1, 0xd4, 0x13, 0x67, 0x40, 0xb6, 0x38, 0x35, 0x74, 0x19, 0x7e, 0xcb, 0x08, 0x96, + 0x2d, 0x68, 0x53, 0x70, 0x75, 0xc0, 0x48, 0xe0, 0x7f, 0xd6, 0xa0, 0xbc, 0xd2, 0x31, 0xdd, 0xae, + 0x04, 0x7e, 0x0b, 0xf2, 0xfc, 0xec, 0x2c, 0xee, 0xa2, 0xb7, 0xa3, 0x62, 0x54, 0x5e, 0xde, 0x58, + 0xe1, 0x27, 0x6d, 0x31, 0x8a, 0x2a, 0x2e, 0xca, 0xd5, 0x6b, 0xb1, 0xf2, 0xf5, 0x1a, 0x7a, 0x00, + 0x39, 0x93, 0x0e, 0x61, 0xd1, 0xac, 0x1a, 0xbf, 0xb5, 0x30, 0x69, 0xec, 0x9c, 0xc3, 0xb9, 0xf0, + 0x1b, 0x50, 0x52, 0x10, 0xe8, 0x65, 0xec, 0x71, 0x43, 0x9c, 0x65, 0x56, 0x56, 0xf7, 0x36, 0x9e, + 0xf3, 0x3b, 0x5a, 0x15, 0x60, 0xad, 0x11, 0xb4, 0x33, 0xf8, 0x7d, 0x31, 0x4a, 0xc4, 0x3b, 0x55, + 0x1f, 0x2d, 0x4d, 0x9f, 0xcc, 0xb9, 0xf4, 0x39, 0x86, 0x8a, 0x98, 0xfe, 0x48, 0x1b, 0xf0, 0x35, + 0xc8, 0x33, 0x79, 0x72, 0xff, 0xcd, 0x26, 0xc0, 0xca, 0x50, 0xc5, 0x19, 0xf1, 0x24, 0x54, 0x76, + 0x7d, 0xd3, 0xef, 0x7b, 0x72, 0xff, 0xfd, 0x49, 0x83, 0xaa, 0xa4, 0x8c, 0x5a, 0x33, 0x93, 0xd7, + 0x7d, 0x9e, 0x01, 0x82, 0xcb, 0xfe, 0x45, 0xc8, 0xb7, 0xf7, 0x77, 0xad, 0x8f, 0x64, 0x7d, 0x53, + 0xb4, 0x28, 0xbd, 0xc3, 0x71, 0xf8, 0x23, 0x83, 0x68, 0xd1, 0xbb, 0xa1, 0x6b, 0xbe, 0xf0, 0x37, + 0xec, 0x36, 0x39, 0x66, 0x47, 0xb0, 0x71, 0x23, 0x24, 0xb0, 0xeb, 0x9c, 0x78, 0x8c, 0x60, 0xe7, + 0x2e, 0xf5, 0x71, 0x62, 0x1a, 0xa6, 0x56, 0xfa, 0xfe, 0x61, 0xc3, 0x36, 0xf7, 0x3b, 0x32, 0x62, + 0xe1, 0x19, 0x40, 0x94, 0xb8, 0x66, 0x79, 0x2a, 0xb5, 0x01, 0xd3, 0x94, 0x4a, 0x6c, 0xdf, 0x6a, + 0x29, 0xe1, 0x4d, 0x26, 0x31, 0x2d, 0x96, 0xc4, 0x4c, 0xcf, 0x7b, 0xe9, 0xb8, 0x6d, 0x31, 0xb5, + 0xa0, 0x8d, 0xd7, 0xb8, 0xf0, 0x67, 0x5e, 0x24, 0x4d, 0x7d, 0x55, 0x29, 0x0b, 0xa1, 0x94, 0xc7, + 0xc4, 0x3f, 0x45, 0x0a, 0xbe, 0x0f, 0x17, 0x24, 0xa7, 0xa8, 0x27, 0x9d, 0xc2, 0xbc, 0x03, 0x57, + 0x25, 0xf3, 0xea, 0x21, 0xbd, 0x88, 0x3c, 0x15, 0x80, 0x5f, 0x57, 0xcf, 0x47, 0x50, 0x0f, 0xf4, + 0x64, 0xe7, 0x4e, 0xa7, 0xa3, 0x2a, 0xd0, 0xf7, 0xc4, 0x9e, 0x29, 0x1a, 0xec, 0x9b, 0xd2, 0x5c, + 0xa7, 0x13, 0x1c, 0x09, 0xe8, 0x37, 0x5e, 0x85, 0x59, 0x29, 0x43, 0x9c, 0x08, 0xa3, 0x42, 0x86, + 0x14, 0x4a, 0x12, 0x22, 0x0c, 0x46, 0x87, 0x9e, 0x6e, 0x76, 0x95, 0x33, 0x6a, 0x5a, 0x26, 0x53, + 0x53, 0x64, 0x5e, 0xe0, 0x3b, 0x82, 0x2a, 0xa6, 0x66, 0x0c, 0x41, 0xa6, 0x02, 0x54, 0xb2, 0x58, + 0x08, 0x4a, 0x1e, 0x5a, 0x88, 0x21, 0xd1, 0x1f, 0xc0, 0x5c, 0xa0, 0x04, 0xb5, 0xdb, 0x53, 0xe2, + 0x76, 0x2d, 0xcf, 0x53, 0x8a, 0x14, 0x49, 0x13, 0xbf, 0x0d, 0xe3, 0x3d, 0x22, 0x62, 0x4a, 0x69, + 0x19, 0x2d, 0xf2, 0x27, 0xc3, 0x45, 0x65, 0x30, 0xeb, 0xc7, 0x6d, 0xb8, 0x26, 0xa5, 0x73, 0x8b, + 0x26, 0x8a, 0x8f, 0x2b, 0x25, 0x2f, 0xb0, 0xdc, 0xac, 0xc3, 0x17, 0xd8, 0x2c, 0x5f, 0xfb, 0xa0, + 0x5a, 0xf6, 0x0e, 0x37, 0xa4, 0xf4, 0xad, 0x91, 0x72, 0xc5, 0x26, 0xb7, 0x69, 0xe0, 0x92, 0x23, + 0x09, 0xdb, 0x87, 0x99, 0xa8, 0x27, 0x8f, 0x14, 0xc6, 0x66, 0x20, 0xe7, 0x3b, 0x47, 0x44, 0x06, + 0x31, 0xde, 0x90, 0x0a, 0x07, 0x6e, 0x3e, 0x92, 0xc2, 0x66, 0x28, 0x8c, 0x6d, 0xc9, 0x51, 0xf5, + 0xa5, 0xab, 0x29, 0x0f, 0x5f, 0xbc, 0x81, 0xb7, 0xe1, 0x62, 0x3c, 0x4c, 0x8c, 0xa4, 0xf2, 0x73, + 0xbe, 0x81, 0x93, 0x22, 0xc9, 0x48, 0x72, 0xdf, 0x0d, 0x83, 0x81, 0x12, 0x50, 0x46, 0x12, 0x69, + 0x80, 0x9e, 0x14, 0x5f, 0xfe, 0x13, 0xfb, 0x35, 0x08, 0x37, 0x23, 0x09, 0xf3, 0x42, 0x61, 0xa3, + 0x2f, 0x7f, 0x18, 0x23, 0xb2, 0xa7, 0xc6, 0x08, 0xe1, 0x24, 0x61, 0x14, 0xfb, 0x06, 0x36, 0x9d, + 0xc0, 0x08, 0x03, 0xe8, 0xa8, 0x18, 0x34, 0x87, 0x04, 0x18, 0xac, 0x21, 0x37, 0xb6, 0x1a, 0x76, + 0x47, 0x5a, 0x8c, 0xf7, 0xc2, 0xd8, 0x39, 0x14, 0x99, 0x47, 0x12, 0xfc, 0x3e, 0xcc, 0xa7, 0x07, + 0xe5, 0x51, 0x24, 0xdf, 0xc3, 0x50, 0x0c, 0x0e, 0x94, 0xca, 0x73, 0x7b, 0x09, 0x0a, 0xdb, 0x3b, + 0xbb, 0x4f, 0x57, 0x56, 0x1b, 0x35, 0x6d, 0xf9, 0x9f, 0x59, 0xc8, 0x6c, 0x3e, 0x47, 0xff, 0x0f, + 0x39, 0xfe, 0xde, 0x74, 0xca, 0x23, 0xa3, 0x7e, 0xda, 0x93, 0x1a, 0xbe, 0xf2, 0xc9, 0x5f, 0xff, + 0xf1, 0x79, 0xe6, 0x22, 0x9e, 0x5a, 0x1a, 0xbc, 0x6e, 0x76, 0x7a, 0x87, 0xe6, 0xd2, 0xd1, 0x60, + 0x89, 0xe5, 0x84, 0x87, 0xda, 0x3d, 0xf4, 0x1c, 0xb2, 0x4f, 0xfb, 0x3e, 0x4a, 0x7d, 0x81, 0xd4, + 0xd3, 0x9f, 0xda, 0xb0, 0xce, 0x24, 0xcf, 0xe0, 0x49, 0x55, 0x72, 0xaf, 0xef, 0x53, 0xb9, 0x03, + 0x28, 0xa9, 0xaf, 0x65, 0x67, 0xbe, 0x4d, 0xea, 0x67, 0xbf, 0xc4, 0x61, 0xcc, 0xf0, 0xae, 0xe0, + 0x4b, 0x2a, 0x1e, 0x7f, 0xd4, 0x53, 0xe7, 0xb3, 0x77, 0x6c, 0xa3, 0xd4, 0xe7, 0x4b, 0x3d, 0xfd, + 0x85, 0x2e, 0x79, 0x3e, 0xfe, 0xb1, 0x4d, 0xe5, 0x3a, 0xe2, 0x85, 0xae, 0xe5, 0xa3, 0x6b, 0x09, + 0x8f, 0x38, 0xea, 0x73, 0x85, 0x3e, 0x9f, 0xce, 0x20, 0x90, 0xae, 0x33, 0xa4, 0xcb, 0xf8, 0xa2, + 0x8a, 0xd4, 0x0a, 0xf8, 0x1e, 0x6a, 0xf7, 0x96, 0x0f, 0x21, 0xc7, 0x8a, 0xac, 0xa8, 0x29, 0x3f, + 0xf4, 0x84, 0xf2, 0x70, 0xca, 0x0e, 0x88, 0x94, 0x67, 0xf1, 0x2c, 0x43, 0x9b, 0xc6, 0xd5, 0x00, + 0x8d, 0xd5, 0x59, 0x1f, 0x6a, 0xf7, 0x16, 0xb4, 0x57, 0xb5, 0xe5, 0xef, 0x8f, 0x43, 0x8e, 0xd5, + 0xad, 0x50, 0x0f, 0x20, 0xac, 0x48, 0xc6, 0xe7, 0x39, 0x54, 0xe3, 0x8c, 0xcf, 0x73, 0xb8, 0x98, + 0x89, 0xaf, 0x31, 0xe4, 0x59, 0x3c, 0x13, 0x20, 0xb3, 0x1f, 0x33, 0x2c, 0xb1, 0x0a, 0x15, 0x35, + 0xeb, 0x4b, 0x28, 0x29, 0x95, 0x45, 0x94, 0x24, 0x31, 0x52, 0x9a, 0x8c, 0x6f, 0x93, 0x84, 0xb2, + 0x24, 0xbe, 0xc1, 0x40, 0xaf, 0xe2, 0xba, 0x6a, 0x5c, 0x8e, 0xeb, 0x32, 0x4e, 0x0a, 0xfc, 0xa9, + 0x06, 0xd5, 0x68, 0x75, 0x11, 0xdd, 0x48, 0x10, 0x1d, 0x2f, 0x52, 0xea, 0x37, 0x4f, 0x67, 0x4a, + 0x55, 0x81, 0xe3, 0x1f, 0x11, 0xd2, 0x33, 0x29, 0xa7, 0xb0, 0x3d, 0xfa, 0x81, 0x06, 0x93, 0xb1, + 0x9a, 0x21, 0x4a, 0x82, 0x18, 0xaa, 0x48, 0xea, 0xb7, 0xce, 0xe0, 0x12, 0x9a, 0xdc, 0x61, 0x9a, + 0x5c, 0xc7, 0x57, 0x86, 0x8d, 0xe1, 0x5b, 0x5d, 0xe2, 0x3b, 0x42, 0x9b, 0xe5, 0x7f, 0x65, 0xa1, + 0xb0, 0xca, 0x7f, 0x79, 0x86, 0x7c, 0x28, 0x06, 0x65, 0x38, 0x34, 0x97, 0x54, 0x12, 0x09, 0x8f, + 0xec, 0xfa, 0xb5, 0xd4, 0x7e, 0xa1, 0xc2, 0x6d, 0xa6, 0xc2, 0x3c, 0xbe, 0x1c, 0xa8, 0x20, 0x7e, + 0xe1, 0xb6, 0xc4, 0x2f, 0xdf, 0x4b, 0x66, 0xbb, 0x4d, 0x97, 0xe4, 0x7b, 0x1a, 0x94, 0xd5, 0x6a, + 0x19, 0xba, 0x9e, 0x58, 0x8c, 0x51, 0x0b, 0x6e, 0x3a, 0x3e, 0x8d, 0x45, 0xe0, 0xdf, 0x65, 0xf8, + 0x37, 0xf0, 0x5c, 0x1a, 0xbe, 0xcb, 0xf8, 0xa3, 0x2a, 0xf0, 0x7a, 0x57, 0xb2, 0x0a, 0x91, 0x72, + 0x5a, 0xb2, 0x0a, 0xd1, 0x72, 0xd9, 0xd9, 0x2a, 0xf4, 0x19, 0x3f, 0x55, 0xe1, 0x18, 0x20, 0x2c, + 0x6f, 0xa1, 0x44, 0xe3, 0x2a, 0x97, 0x98, 0xb8, 0x0f, 0x0e, 0x57, 0xc6, 0x12, 0x76, 0x40, 0x0c, + 0xbb, 0x63, 0x79, 0xd4, 0x17, 0x97, 0x7f, 0x9f, 0x83, 0xd2, 0x13, 0xd3, 0xb2, 0x7d, 0x62, 0x9b, + 0x76, 0x8b, 0xa0, 0x03, 0xc8, 0xb1, 0x2c, 0x15, 0x0f, 0x3c, 0x6a, 0xd9, 0x27, 0x1e, 0x78, 0x22, + 0x35, 0x11, 0x7c, 0x8b, 0x41, 0x5f, 0xc3, 0x7a, 0x00, 0xdd, 0x0d, 0xe5, 0x2f, 0xb1, 0x7a, 0x06, + 0x9d, 0xf2, 0x11, 0xe4, 0x79, 0xfd, 0x02, 0xc5, 0xa4, 0x45, 0xea, 0x1c, 0xfa, 0x95, 0xe4, 0xce, + 0xd4, 0x5d, 0xa6, 0x62, 0x79, 0x8c, 0x99, 0x82, 0x7d, 0x07, 0x20, 0xac, 0xd6, 0xc5, 0xed, 0x3b, + 0x54, 0xdc, 0xd3, 0xe7, 0xd3, 0x19, 0x04, 0xf0, 0x3d, 0x06, 0x7c, 0x13, 0x5f, 0x4b, 0x04, 0x6e, + 0x07, 0x03, 0x28, 0x78, 0x0b, 0xc6, 0xd7, 0x4d, 0xef, 0x10, 0xc5, 0x92, 0x90, 0xf2, 0xb0, 0xac, + 0xeb, 0x49, 0x5d, 0x02, 0xea, 0x26, 0x83, 0x9a, 0xc3, 0xb3, 0x89, 0x50, 0x87, 0xa6, 0x47, 0x63, + 0x3a, 0xea, 0xc3, 0x84, 0x7c, 0x2c, 0x46, 0x57, 0x63, 0x36, 0x8b, 0x3e, 0x2c, 0xeb, 0x73, 0x69, + 0xdd, 0x02, 0x70, 0x81, 0x01, 0x62, 0x7c, 0x35, 0xd9, 0xa8, 0x82, 0xfd, 0xa1, 0x76, 0xef, 0x55, + 0x8d, 0x46, 0x54, 0x08, 0x4b, 0x91, 0x43, 0x3b, 0x37, 0x5e, 0xd5, 0x1c, 0xda, 0xb9, 0x43, 0x55, + 0x4c, 0xfc, 0x3a, 0x43, 0x7f, 0x80, 0x17, 0x12, 0xd1, 0x7d, 0xd7, 0xb4, 0xbd, 0x17, 0xc4, 0x7d, + 0xc0, 0x6b, 0x4e, 0xde, 0xa1, 0xd5, 0xa3, 0xbb, 0xf8, 0x47, 0x35, 0x18, 0xa7, 0xc7, 0x36, 0x9a, + 0xcc, 0xc2, 0xdb, 0x6e, 0x5c, 0x9d, 0xa1, 0x1a, 0x53, 0x5c, 0x9d, 0xe1, 0x8b, 0x72, 0x42, 0x32, + 0x63, 0x3f, 0x03, 0x26, 0x8c, 0x8b, 0x1a, 0xde, 0x87, 0x92, 0x72, 0x27, 0x46, 0x09, 0x12, 0xa3, + 0x15, 0xac, 0x78, 0x32, 0x4b, 0xb8, 0x50, 0xe3, 0x79, 0x06, 0xaa, 0xe3, 0x0b, 0x51, 0xd0, 0x36, + 0x67, 0xa3, 0xa8, 0x1f, 0x43, 0x59, 0xbd, 0x3c, 0xa3, 0x04, 0xa1, 0xb1, 0x12, 0x59, 0x3c, 0x64, + 0x25, 0xdd, 0xbd, 0x13, 0x7c, 0x37, 0xf8, 0xd1, 0xb3, 0xe4, 0xa5, 0xe8, 0x1f, 0x42, 0x41, 0x5c, + 0xa9, 0x93, 0xe6, 0x1b, 0x2d, 0xaa, 0x25, 0xcd, 0x37, 0x76, 0x1f, 0x4f, 0x38, 0x19, 0x31, 0x58, + 0x7a, 0x75, 0x90, 0x79, 0x42, 0x40, 0x3e, 0x26, 0x7e, 0x1a, 0x64, 0x58, 0x26, 0x4a, 0x83, 0x54, + 0xae, 0x6d, 0xa7, 0x42, 0x1e, 0x10, 0x5f, 0xb8, 0x94, 0xbc, 0x13, 0xa1, 0x14, 0x89, 0x6a, 0x50, + 0xc6, 0xa7, 0xb1, 0xa4, 0x1e, 0x66, 0x43, 0x54, 0x11, 0x91, 0xd1, 0x77, 0x01, 0xc2, 0xfb, 0x7f, + 0xfc, 0x7c, 0x92, 0x58, 0x44, 0x8c, 0x9f, 0x4f, 0x92, 0x4b, 0x08, 0x09, 0x81, 0x24, 0x04, 0xe7, + 0x07, 0x6a, 0x0a, 0xff, 0x53, 0x0d, 0xd0, 0x70, 0xbd, 0x00, 0xdd, 0x4f, 0x86, 0x48, 0xac, 0x4f, + 0xea, 0xaf, 0x9c, 0x8f, 0x39, 0x35, 0x88, 0x87, 0x7a, 0xb5, 0xd8, 0x90, 0xde, 0x4b, 0xaa, 0xd9, + 0x67, 0x1a, 0x54, 0x22, 0x15, 0x07, 0x74, 0x3b, 0x65, 0x9d, 0x63, 0x35, 0x4e, 0xfd, 0xce, 0x99, + 0x7c, 0xa9, 0x47, 0x38, 0x65, 0x57, 0xc8, 0xe3, 0xeb, 0x0f, 0x35, 0xa8, 0x46, 0xcb, 0x14, 0x28, + 0x05, 0x60, 0xa8, 0x50, 0xaa, 0x2f, 0x9c, 0xcd, 0x78, 0x8e, 0xd5, 0x0a, 0x4f, 0xb4, 0x1f, 0x42, + 0x41, 0x54, 0x37, 0x92, 0xdc, 0x22, 0x5a, 0x67, 0x4d, 0x72, 0x8b, 0x58, 0x69, 0x24, 0xcd, 0x2d, + 0x5c, 0xa7, 0x43, 0x14, 0x4f, 0x14, 0x35, 0x90, 0x34, 0xc8, 0xd3, 0x3d, 0x31, 0x56, 0x40, 0x39, + 0x15, 0x32, 0xf4, 0x44, 0x59, 0x01, 0x41, 0x29, 0x12, 0xcf, 0xf0, 0xc4, 0x78, 0x01, 0x25, 0xcd, + 0x13, 0x19, 0xaa, 0xe2, 0x89, 0x61, 0xc1, 0x22, 0xc9, 0x13, 0x87, 0xaa, 0xc8, 0x49, 0x9e, 0x38, + 0x5c, 0xf3, 0x48, 0x5b, 0x5b, 0x06, 0x1e, 0xf1, 0xc4, 0xe9, 0x84, 0x02, 0x07, 0x7a, 0x25, 0xc5, + 0xa6, 0x89, 0x15, 0x6a, 0xfd, 0xc1, 0x39, 0xb9, 0x4f, 0xf7, 0x00, 0xbe, 0x1a, 0xd2, 0x03, 0x7e, + 0xa9, 0xc1, 0x4c, 0x52, 0x85, 0x04, 0xa5, 0x80, 0xa5, 0x94, 0xb7, 0xf5, 0xc5, 0xf3, 0xb2, 0x9f, + 0xc3, 0x6e, 0x81, 0x4f, 0x3c, 0xaa, 0xfd, 0xf1, 0xcb, 0x39, 0xed, 0x2f, 0x5f, 0xce, 0x69, 0x7f, + 0xfb, 0x72, 0x4e, 0xfb, 0xd9, 0xdf, 0xe7, 0xc6, 0xf6, 0xf3, 0xec, 0xff, 0xe2, 0xbc, 0xfe, 0xef, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x2a, 0x70, 0xa1, 0x12, 0x34, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto index a6cd00ab7c..37916c03f1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto @@ -191,6 +191,14 @@ service Maintenance { body: "*" }; } + + // MoveLeader requests current leader node to transfer its leadership to transferee. + rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/transfer-leadership" + body: "*" + }; + } } service Auth { @@ -380,7 +388,7 @@ message RangeRequest { // keys_only when set returns only the keys and not the values. bool keys_only = 8; - + // count_only when set returns only the count of the keys in the range. bool count_only = 9; @@ -469,6 +477,7 @@ message RequestOp { RangeRequest request_range = 1; PutRequest request_put = 2; DeleteRangeRequest request_delete_range = 3; + TxnRequest request_txn = 4; } } @@ -478,6 +487,7 @@ message ResponseOp { RangeResponse response_range = 1; PutResponse response_put = 2; DeleteRangeResponse response_delete_range = 3; + TxnResponse response_txn = 4; } } @@ -510,6 +520,10 @@ message Compare { // value is the value of the given key, in bytes. bytes value = 7; } + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + bytes range_end = 8; + // TODO: fill out with most of the rest of RangeRequest fields when needed. } // From google paxosdb paper: @@ -552,7 +566,7 @@ message TxnResponse { // CompactionRequest compacts the key-value store up to a given revision. All superseded keys // with a revision less than the compaction revision will be removed. message CompactionRequest { - // revision is the key-value store revision for the compaction operation. + // revision is the key-value store revision for the compaction operation. int64 revision = 1; // physical is set so the RPC will wait until the compaction is physically // applied to the local database such that compacted entries are totally @@ -648,7 +662,7 @@ message WatchResponse { // at a compacted index. // // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. + // catch up with the progress of the key-value store. // // The client should treat the watcher as canceled and should not try to create any // watcher with the same start_revision again. @@ -781,6 +795,15 @@ message DefragmentResponse { ResponseHeader header = 1; } +message MoveLeaderRequest { + // targetID is the node ID for the new leader. + uint64 targetID = 1; +} + +message MoveLeaderResponse { + ResponseHeader header = 1; +} + enum AlarmType { NONE = 0; // default, used to query if any alarm is active NOSPACE = 1; // space quota is exhausted diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go index 3b58b41543..12120beaef 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -52,7 +52,7 @@ func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listene if scheme != "https" && scheme != "unixs" { return l, nil } - return newTLSListener(l, tlsinfo) + return newTLSListener(l, tlsinfo, checkSAN) } type TLSInfo struct { @@ -61,6 +61,7 @@ type TLSInfo struct { CAFile string TrustedCAFile string ClientCertAuth bool + CRLFile string // ServerName ensures the cert matches the given host in case of discovery / virtual hosting ServerName string @@ -77,7 +78,7 @@ type TLSInfo struct { } func (info TLSInfo) String() string { - return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth) + return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) } func (info TLSInfo) Empty() bool { diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go index ecc1245489..545e0c43db 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go @@ -19,21 +19,32 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "io/ioutil" "net" "sync" ) // tlsListener overrides a TLS listener so it will reject client -// certificates with insufficient SAN credentials. +// certificates with insufficient SAN credentials or CRL revoked +// certificates. type tlsListener struct { net.Listener connc chan net.Conn donec chan struct{} err error handshakeFailure func(*tls.Conn, error) + check tlsCheckFunc } -func newTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { +type tlsCheckFunc func(context.Context, *tls.Conn) error + +// NewTLSListener handshakes TLS connections and performs optional CRL checking. +func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + check := func(context.Context, *tls.Conn) error { return nil } + return newTLSListener(l, tlsinfo, check) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) { if tlsinfo == nil || tlsinfo.Empty() { l.Close() return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) @@ -47,11 +58,27 @@ func newTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { if hf == nil { hf = func(*tls.Conn, error) {} } + + if len(tlsinfo.CRLFile) > 0 { + prevCheck := check + check = func(ctx context.Context, tlsConn *tls.Conn) error { + if err := prevCheck(ctx, tlsConn); err != nil { + return err + } + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + return checkCRL(tlsinfo.CRLFile, certs) + } + return nil + } + } + tlsl := &tlsListener{ Listener: tls.NewListener(l, tlscfg), connc: make(chan net.Conn), donec: make(chan struct{}), handshakeFailure: hf, + check: check, } go tlsl.acceptLoop() return tlsl, nil @@ -66,6 +93,15 @@ func (l *tlsListener) Accept() (net.Conn, error) { } } +func checkSAN(ctx context.Context, tlsConn *tls.Conn) error { + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + addr := tlsConn.RemoteAddr().String() + return checkCertSAN(ctx, certs[0], addr) + } + return nil +} + // acceptLoop launches each TLS handshake in a separate goroutine // to prevent a hanging TLS connection from blocking other connections. func (l *tlsListener) acceptLoop() { @@ -110,20 +146,16 @@ func (l *tlsListener) acceptLoop() { pendingMu.Lock() delete(pending, conn) pendingMu.Unlock() + if herr != nil { l.handshakeFailure(tlsConn, herr) return } - - st := tlsConn.ConnectionState() - if len(st.PeerCertificates) > 0 { - cert := st.PeerCertificates[0] - addr := tlsConn.RemoteAddr().String() - if cerr := checkCert(ctx, cert, addr); cerr != nil { - l.handshakeFailure(tlsConn, cerr) - return - } + if err := l.check(ctx, tlsConn); err != nil { + l.handshakeFailure(tlsConn, err) + return } + select { case l.connc <- tlsConn: conn = nil @@ -133,16 +165,43 @@ func (l *tlsListener) acceptLoop() { } } -func checkCert(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { - h, _, herr := net.SplitHostPort(remoteAddr) +func checkCRL(crlPath string, cert []*x509.Certificate) error { + // TODO: cache + crlBytes, err := ioutil.ReadFile(crlPath) + if err != nil { + return err + } + certList, err := x509.ParseCRL(crlBytes) + if err != nil { + return err + } + revokedSerials := make(map[string]struct{}) + for _, rc := range certList.TBSCertList.RevokedCertificates { + revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} + } + for _, c := range cert { + serial := string(c.SerialNumber.Bytes()) + if _, ok := revokedSerials[serial]; ok { + return fmt.Errorf("transport: certificate serial %x revoked", serial) + } + } + return nil +} + +func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { return nil } + h, _, herr := net.SplitHostPort(remoteAddr) if herr != nil { return herr } if len(cert.IPAddresses) > 0 { - if cerr := cert.VerifyHostname(h); cerr != nil && len(cert.DNSNames) == 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { return cerr } } diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go index accbf6a510..9134cebd35 100644 --- a/vendor/github.com/coreos/etcd/version/version.go +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.2.0-rc.1+git" + Version = "3.2.0+git" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/github.com/denisenkom/go-mssqldb/README.md b/vendor/github.com/denisenkom/go-mssqldb/README.md index 11e0a6852e..64177bda69 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/README.md +++ b/vendor/github.com/denisenkom/go-mssqldb/README.md @@ -1,5 +1,9 @@ # A pure Go MSSQL driver for Go's database/sql package +[![GoDoc](https://godoc.org/github.com/denisenkom/go-mssqldb?status.svg)](http://godoc.org/github.com/denisenkom/go-mssqldb) +[![Build status](https://ci.appveyor.com/api/projects/status/ujv21jd241h8o5s7?svg=true)](https://ci.appveyor.com/project/denisenk/go-mssqldb) +[![codecov](https://codecov.io/gh/denisenkom/go-mssqldb/branch/master/graph/badge.svg)](https://codecov.io/gh/denisenkom/go-mssqldb) + ## Install go get github.com/denisenkom/go-mssqldb diff --git a/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml b/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml index 0c43e5e084..0155dce0bd 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml +++ b/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml @@ -44,7 +44,9 @@ before_test: Start-Service "SQLBrowser" - sqlcmd -S "(local)\%SQLINSTANCE%" -Q "Use [master]; CREATE DATABASE test;" - sqlcmd -S "(local)\%SQLINSTANCE%" -h -1 -Q "set nocount on; Select @@version" + - pip install codecov test_script: - - go test + - go test -race -coverprofile=coverage.txt -covermode=atomic + - codecov -f coverage.txt diff --git a/vendor/github.com/denisenkom/go-mssqldb/types.go b/vendor/github.com/denisenkom/go-mssqldb/types.go index 46de0650b8..295de8034b 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/types.go +++ b/vendor/github.com/denisenkom/go-mssqldb/types.go @@ -923,6 +923,10 @@ func makeGoLangScanType(ti typeInfo) reflect.Type { default: panic("invalid size of MONEYN") } + case typeDateTim4: + return reflect.TypeOf(time.Time{}) + case typeDateTime: + return reflect.TypeOf(time.Time{}) case typeDateTimeN: switch ti.Size { case 4: diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 0ce2c94303..18a1263f10 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - "github.com/docker/go-units" + units "github.com/docker/go-units" ) // CheckpointCreateOptions holds parameters to create a checkpoint from a container @@ -178,6 +178,11 @@ type ImageBuildOptions struct { SecurityOpt []string ExtraHosts []string // List of extra hosts Target string + SessionID string + + // TODO @jhowardmsft LCOW Support: This will require extending to include + // `Platform string`, but is ommited for now as it's hard-coded temporarily + // to avoid API changes. } // ImageBuildResponse holds information diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 20c19f2132..e4d2ce6e36 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -16,6 +16,7 @@ type ContainerCreateConfig struct { HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig AdjustCPUShares bool + Platform string } // ContainerRmConfig holds arguments for the container remove diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 0000000000..47ae234ef3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 0000000000..1fdc9b0436 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,712 @@ +// Code generated by protoc-gen-gogo. +// source: plugin.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, + 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, + 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, + 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, + 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, + 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, + 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, + 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, + 0x0c, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 0000000000..06eb7ba650 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go index fdb2388888..91f3578428 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -12,7 +12,8 @@ type Secret struct { // SecretSpec represents a secret specification from a secret in swarm type SecretSpec struct { Annotations - Data []byte `json:",omitempty"` + Data []byte `json:",omitempty"` + Driver *Driver `json:"omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store } // SecretReferenceFileTarget is a file target in a secret reference diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 5b74f14b11..b65fa86dac 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -2,7 +2,7 @@ package swarm import "time" -// ClusterInfo represents info about the cluster for outputing in "info" +// ClusterInfo represents info about the cluster for outputting in "info" // it contains the same information as "Swarm", but without the JoinTokens type ClusterInfo struct { ID string diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index a598a79d59..1712c06cf6 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -1,6 +1,10 @@ package swarm -import "time" +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) // TaskState represents the state of a task. type TaskState string @@ -51,7 +55,11 @@ type Task struct { // TaskSpec represents the spec of a task. type TaskSpec struct { - ContainerSpec ContainerSpec `json:",omitempty"` + // ContainerSpec and PluginSpec are mutually exclusive. + // PluginSpec will only be used when the `Runtime` field is set to `plugin` + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` RestartPolicy *RestartPolicy `json:",omitempty"` Placement *Placement `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index 37adca2f57..c96df27332 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -45,6 +45,12 @@ type ImageInspect struct { VirtualSize int64 GraphDriver GraphDriverData RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` } // Container contains response of Engine API: @@ -320,6 +326,7 @@ type ContainerJSONBase struct { Name string RestartCount int Driver string + Platform string MountLabel string ProcessLabel string AppArmorProfile string @@ -488,10 +495,11 @@ type Runtime struct { // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuilderSize int64 } // ContainersPruneReport contains the response for Engine API: @@ -515,6 +523,12 @@ type ImagesPruneReport struct { SpaceReclaimed uint64 } +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + SpaceReclaimed uint64 +} + // NetworksPruneReport contains the response for Engine API: // POST "/networks/prune" type NetworksPruneReport struct { diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go index fb03b50111..1095063977 100644 --- a/vendor/github.com/docker/docker/opts/ip.go +++ b/vendor/github.com/docker/docker/opts/ip.go @@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { } // Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. +// string is not parsable as an IP address it returns an error. func (o *IPOpt) Set(val string) error { ip := net.ParseIP(val) if ip == nil { diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 5fb774d602..6cbc2e2bd2 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -93,6 +93,16 @@ const ( OverlayWhiteoutFormat ) +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { @@ -180,7 +190,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { } } -// CompressStream compresseses the dest with specified compression algorithm. +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) @@ -305,20 +315,14 @@ func (compression *Compression) Extension() string { // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. -func FileInfoHeader(path, name string, fi os.FileInfo) (*tar.Header, error) { - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return nil, err - } - } +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) name, err = canonicalTarName(name, fi.IsDir()) if err != nil { return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) @@ -327,12 +331,43 @@ func FileInfoHeader(path, name string, fi os.FileInfo) (*tar.Header, error) { if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { return nil, err } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability) } - return hdr, nil + return nil } type tarWhiteoutConverter interface { @@ -386,10 +421,22 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } - hdr, err := FileInfoHeader(path, name, fi) + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly @@ -1035,7 +1082,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { return err } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index f072534d52..6e950e93cf 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -5,9 +5,9 @@ import ( "os" "path/filepath" "strings" - "syscall" "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" ) func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { @@ -67,7 +67,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay if base == WhiteoutOpaqueDir { - err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) // don't write the file itself return false, err } @@ -77,7 +77,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) - if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { return false, err } if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index 33ee88766f..1213174322 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" ) // fixVolumePathPrefix does platform specific processing to ensure that if @@ -45,16 +46,13 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { s, ok := stat.(*syscall.Stat_t) - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } } return @@ -63,13 +61,10 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( func getInodeFromStat(stat interface{}) (inode uint64, err error) { s, ok := stat.(*syscall.Stat_t) - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return + if ok { + inode = uint64(s.Ino) } - inode = uint64(s.Ino) - return } @@ -101,11 +96,11 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { mode := uint32(hdr.Mode & 07777) switch hdr.Typeflag { case tar.TypeBlock: - mode |= syscall.S_IFBLK + mode |= unix.S_IFBLK case tar.TypeChar: - mode |= syscall.S_IFCHR + mode |= unix.S_IFCHR case tar.TypeFifo: - mode |= syscall.S_IFIFO + mode |= unix.S_IFIFO } return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go index 771860294a..b987e52245 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -10,6 +10,7 @@ import ( "unsafe" "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" ) // walker is used to implement collectFileInfoForChanges on linux. Where this @@ -233,7 +234,7 @@ func readdirnames(dirname string) (names []nameIno, err error) { // Refill the buffer if necessary if bufp >= nbuf { bufp = 0 - nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux if nbuf < 0 { nbuf = 0 } @@ -255,12 +256,12 @@ func readdirnames(dirname string) (names []nameIno, err error) { return sl, nil } -// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// parseDirent is a minor modification of unix.ParseDirent (linux version) // which returns {name,inode} pairs instead of just names. func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { origlen := len(buf) for len(buf) > 0 { - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) buf = buf[dirent.Reclen:] if dirent.Ino == 0 { // File absent in directory. continue diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go index 3778b732cf..98e2b39aea 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -7,6 +7,7 @@ import ( "syscall" "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" ) func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { @@ -16,7 +17,7 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { oldStat.GID() != newStat.GID() || oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { return true } @@ -24,7 +25,7 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 } func getIno(fi os.FileInfo) uint64 { diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 0614c67cec..5281e29d18 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -332,6 +332,9 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read } hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } if err = rebasedTar.WriteHeader(hdr); err != nil { w.CloseWithError(err) diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index d20854e2a2..a2766b5928 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -84,7 +84,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) + err = system.MkdirAll(parentPath, 0600, "") if err != nil { return 0, err } diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go similarity index 77% rename from vendor/github.com/docker/docker/pkg/homedir/homedir.go rename to vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go index 8154e83f0c..f2a20ea8f8 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -1,8 +1,9 @@ +// +build !windows + package homedir import ( "os" - "runtime" "github.com/opencontainers/runc/libcontainer/user" ) @@ -10,9 +11,6 @@ import ( // Key returns the env var name for the user's home dir based on // the platform being run on func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } return "HOME" } @@ -21,7 +19,7 @@ func Key() string { // Returned path should be used with "path/filepath" to form new paths. func Get() string { home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { + if home == "" { if u, err := user.CurrentUser(); err == nil { return u.Home } @@ -32,8 +30,5 @@ func Get() string { // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } return "~" } diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 0000000000..fafdb2bbf9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index 0b28249fa8..8701bb7fa9 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -49,7 +49,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown paths = append(paths, dirPath) } } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { return err } } else { diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index 8ed8353060..45d2878e38 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -11,7 +11,7 @@ import ( // Platforms such as Windows do not support the UID/GID concept. So make this // just a wrapper around system.MkdirAll. func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { return err } return nil diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go index 25f466183e..0425d0dd63 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -1,87 +1,87 @@ package mount import ( - "syscall" + "golang.org/x/sys/unix" ) const ( // RDONLY will mount the file system read-only. - RDONLY = syscall.MS_RDONLY + RDONLY = unix.MS_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. - NOSUID = syscall.MS_NOSUID + NOSUID = unix.MS_NOSUID // NODEV will not interpret character or block special devices on the file // system. - NODEV = syscall.MS_NODEV + NODEV = unix.MS_NODEV // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = syscall.MS_NOEXEC + NOEXEC = unix.MS_NOEXEC // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = syscall.MS_SYNCHRONOUS + SYNCHRONOUS = unix.MS_SYNCHRONOUS // DIRSYNC will force all directory updates within the file system to be done // synchronously. This affects the following system calls: create, link, // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = syscall.MS_DIRSYNC + DIRSYNC = unix.MS_DIRSYNC // REMOUNT will attempt to remount an already-mounted file system. This is // commonly used to change the mount flags for a file system, especially to // make a readonly file system writeable. It does not change device or mount // point. - REMOUNT = syscall.MS_REMOUNT + REMOUNT = unix.MS_REMOUNT // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = syscall.MS_MANDLOCK + MANDLOCK = unix.MS_MANDLOCK // NOATIME will not update the file access time when reading from a file. - NOATIME = syscall.MS_NOATIME + NOATIME = unix.MS_NOATIME // NODIRATIME will not update the directory access time. - NODIRATIME = syscall.MS_NODIRATIME + NODIRATIME = unix.MS_NODIRATIME // BIND remounts a subtree somewhere else. - BIND = syscall.MS_BIND + BIND = unix.MS_BIND // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = syscall.MS_BIND | syscall.MS_REC + RBIND = unix.MS_BIND | unix.MS_REC // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = syscall.MS_UNBINDABLE + UNBINDABLE = unix.MS_UNBINDABLE // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = syscall.MS_PRIVATE + PRIVATE = unix.MS_PRIVATE // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC // SLAVE creates a mount which receives propagation from its master, but not // vice versa. - SLAVE = syscall.MS_SLAVE + SLAVE = unix.MS_SLAVE // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + RSLAVE = unix.MS_SLAVE | unix.MS_REC // SHARED creates a mount which provides the ability to create mirrors of // that mount such that mounts and unmounts within any of the mirrors // propagate to the other mirrors. - SHARED = syscall.MS_SHARED + SHARED = unix.MS_SHARED // RSHARED marks the entire mount tree as SHARED. - RSHARED = syscall.MS_SHARED | syscall.MS_REC + RSHARED = unix.MS_SHARED | unix.MS_REC // RELATIME updates inode access times relative to modify or change time. - RELATIME = syscall.MS_RELATIME + RELATIME = unix.MS_RELATIME // STRICTATIME allows to explicitly request full atime updates. This makes // it possible for the kernel to default to relatime or noatime but still // allow userspace to override it. - STRICTATIME = syscall.MS_STRICTATIME + STRICTATIME = unix.MS_STRICTATIME - mntDetach = syscall.MNT_DETACH + mntDetach = unix.MNT_DETACH ) diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go index bb870e6f59..814896cc9e 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -13,8 +13,9 @@ import "C" import ( "fmt" "strings" - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func allocateIOVecs(options []string) []C.struct_iovec { @@ -55,5 +56,5 @@ func mount(device, target, mType string, flag uintptr, data string) error { } func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) + return unix.Unmount(target, flag) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go index 3ef2ce6f0d..39c36d472a 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -1,18 +1,18 @@ package mount import ( - "syscall" + "golang.org/x/sys/unix" ) const ( // ptypes is the set propagation types. - ptypes = syscall.MS_SHARED | syscall.MS_PRIVATE | syscall.MS_SLAVE | syscall.MS_UNBINDABLE + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE // pflags is the full set valid flags for a change propagation call. - pflags = ptypes | syscall.MS_REC | syscall.MS_SILENT + pflags = ptypes | unix.MS_REC | unix.MS_SILENT // broflags is the combination of bind and read only - broflags = syscall.MS_BIND | syscall.MS_RDONLY + broflags = unix.MS_BIND | unix.MS_RDONLY ) // isremount returns true if either device name or flags identify a remount request, false otherwise. @@ -20,7 +20,7 @@ func isremount(device string, flags uintptr) bool { switch { // We treat device "" and "none" as a remount request to provide compatibility with // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. - case flags&syscall.MS_REMOUNT != 0, device == "", device == "none": + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": return true default: return false @@ -29,28 +29,29 @@ func isremount(device string, flags uintptr) bool { func mount(device, target, mType string, flags uintptr, data string) error { oflags := flags &^ ptypes - if !isremount(device, flags) { - // Initial call applying all non-propagation flags. - if err := syscall.Mount(device, target, mType, oflags, data); err != nil { + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { return err } } if flags&ptypes != 0 { // Change the propagation type. - if err := syscall.Mount("", target, "", flags&pflags, ""); err != nil { + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { return err } } if oflags&broflags == broflags { // Remount the bind to apply read only. - return syscall.Mount("", target, "", oflags|syscall.MS_REMOUNT, "") + return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") } return nil } func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) + return unix.Unmount(target, flag) } diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go index 7637f12e1a..056d19954d 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -2,26 +2,9 @@ package system import ( "os" - "syscall" "time" - "unsafe" ) -var ( - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go index 2945868465..45428c141c 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -3,25 +3,26 @@ package system import ( - "syscall" "time" + + "golang.org/x/sys/windows" ) //setCTime will set the create time on a file. On Windows, this requires //calling SetFileTime and explicitly including the create time. func setCTime(path string, ctime time.Time) error { - ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) - pathp, e := syscall.UTF16PtrFromString(path) + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) if e != nil { return e } - h, e := syscall.CreateFile(pathp, - syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, - syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) if e != nil { return e } - defer syscall.Close(h) - c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) - return syscall.SetFileTime(h, &c, nil, nil) + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) } diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go index 3ec6d22151..192e367882 100644 --- a/vendor/github.com/docker/docker/pkg/system/events_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go @@ -19,8 +19,8 @@ var ( ) // CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) +func CreateEvent(eventAttributes *windows.SecurityAttributes, manualReset bool, initialState bool, name string) (handle windows.Handle, err error) { + namep, _ := windows.UTF16PtrFromString(name) var _p1 uint32 if manualReset { _p1 = 1 @@ -31,45 +31,45 @@ func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, } r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { err = e1 } return } // OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle windows.Handle, err error) { + namep, _ := windows.UTF16PtrFromString(name) var _p1 uint32 if inheritHandle { _p1 = 1 } r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { err = e1 } return } // SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { +func SetEvent(handle windows.Handle) (err error) { return setResetPulse(handle, procSetEvent) } // ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { +func ResetEvent(handle windows.Handle) (err error) { return setResetPulse(handle, procResetEvent) } // PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { +func PulseEvent(handle windows.Handle) (err error) { return setResetPulse(handle, procPulseEvent) } -func setResetPulse(handle syscall.Handle, proc *windows.LazyProc) (err error) { +func setResetPulse(handle windows.Handle, proc *windows.LazyProc) (err error) { r0, _, _ := proc.Call(uintptr(handle)) if r0 != 0 { err = syscall.Errno(r0) diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go index 7aa920de1a..102565f760 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -8,15 +8,14 @@ import ( "path/filepath" ) -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return MkdirAll(path, perm) +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) } // MkdirAll creates a directory named path along with any necessary parents, // with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { +func MkdirAll(path string, perm os.FileMode, sddl string) error { return os.MkdirAll(path, perm) } diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index 626d2ad886..a61b53d0ba 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -14,23 +14,31 @@ import ( "unsafe" winio "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" ) // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return mkdirall(path, true) +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) } // MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false) +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) } // mkdirall is a custom version of os.MkdirAll modified for use on Windows // so that it is both volume path aware, and can create a directory with // a DACL. -func mkdirall(path string, adminAndLocalSystem bool) error { +func mkdirall(path string, applyACL bool, sddl string) error { if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { return nil } @@ -64,15 +72,15 @@ func mkdirall(path string, adminAndLocalSystem bool) error { if j > 1 { // Create parent - err = mkdirall(path[0:j-1], false) + err = mkdirall(path[0:j-1], false, sddl) if err != nil { return err } } // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if adminAndLocalSystem { - err = mkdirWithACL(path) + if applyACL { + err = mkdirWithACL(path, sddl) } else { err = os.Mkdir(path, 0) } @@ -92,13 +100,12 @@ func mkdirall(path string, adminAndLocalSystem bool) error { // mkdirWithACL creates a new directory. If there is an error, it will be of // type *PathError. . // -// This is a modified and combined version of os.Mkdir and syscall.Mkdir +// This is a modified and combined version of os.Mkdir and windows.Mkdir // in golang to cater for creating a directory am ACL permitting full // access, with inheritance, to any subfolder/file for Built-in Administrators // and Local System. -func mkdirWithACL(name string) error { - sa := syscall.SecurityAttributes{Length: 0} - sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} sd, err := winio.SddlToSecurityDescriptor(sddl) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} @@ -107,12 +114,12 @@ func mkdirWithACL(name string) error { sa.InheritHandle = 1 sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - namep, err := syscall.UTF16PtrFromString(name) + namep, err := windows.UTF16PtrFromString(name) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} } - e := syscall.CreateDirectory(namep, &sa) + e := windows.CreateDirectory(namep, &sa) if e != nil { return &os.PathError{Op: "mkdir", Path: name, Err: e} } @@ -135,7 +142,7 @@ func IsAbs(path string) bool { return true } -// The origin of the functions below here are the golang OS and syscall packages, +// The origin of the functions below here are the golang OS and windows packages, // slightly modified to only cope with files, not directories due to the // specific use case. // @@ -167,74 +174,74 @@ func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) if name == "" { return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} } - r, errf := syscallOpenFileSequential(name, flag, 0) + r, errf := windowsOpenFileSequential(name, flag, 0) if errf == nil { return r, nil } return nil, &os.PathError{Op: "open", Path: name, Err: errf} } -func syscallOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := syscallOpenSequential(name, flag|syscall.O_CLOEXEC, 0) +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) if e != nil { return nil, e } return os.NewFile(uintptr(r), name), nil } -func makeInheritSa() *syscall.SecurityAttributes { - var sa syscall.SecurityAttributes +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes sa.Length = uint32(unsafe.Sizeof(sa)) sa.InheritHandle = 1 return &sa } -func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle, err error) { +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { if len(path) == 0 { - return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND } - pathp, err := syscall.UTF16PtrFromString(path) + pathp, err := windows.UTF16PtrFromString(path) if err != nil { - return syscall.InvalidHandle, err + return windows.InvalidHandle, err } var access uint32 - switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { - case syscall.O_RDONLY: - access = syscall.GENERIC_READ - case syscall.O_WRONLY: - access = syscall.GENERIC_WRITE - case syscall.O_RDWR: - access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE } - if mode&syscall.O_CREAT != 0 { - access |= syscall.GENERIC_WRITE + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE } - if mode&syscall.O_APPEND != 0 { - access &^= syscall.GENERIC_WRITE - access |= syscall.FILE_APPEND_DATA + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA } - sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) - var sa *syscall.SecurityAttributes - if mode&syscall.O_CLOEXEC == 0 { + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { sa = makeInheritSa() } var createmode uint32 switch { - case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): - createmode = syscall.CREATE_NEW - case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): - createmode = syscall.CREATE_ALWAYS - case mode&syscall.O_CREAT == syscall.O_CREAT: - createmode = syscall.OPEN_ALWAYS - case mode&syscall.O_TRUNC == syscall.O_TRUNC: - createmode = syscall.TRUNCATE_EXISTING + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING default: - createmode = syscall.OPEN_EXISTING + createmode = windows.OPEN_EXISTING } // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) return h, e } diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 0000000000..17935088de --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 0000000000..019c66441c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go new file mode 100644 index 0000000000..cff33bb408 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go new file mode 100644 index 0000000000..e54d01e696 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go index 62fda5bacf..bd23c4d50b 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -2,7 +2,9 @@ package system -import "syscall" +import ( + "syscall" +) // Lstat takes a path to a file and returns // a system.StatT type pertaining to that file. diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go index 73958182b4..af79a65383 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -3,13 +3,13 @@ package system import ( - "syscall" + "golang.org/x/sys/unix" ) // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) + return unix.Mknod(path, mode, dev) } // Mkdev is used to build the value of linux devices (in /dev/) which specifies major diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 0000000000..f634a6be67 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go index c607c4db09..f3762e69d3 100644 --- a/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -2,11 +2,6 @@ package system -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. This is a no-op on Linux. func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go index cbfe2c1576..aab891522d 100644 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -8,15 +8,11 @@ import ( "strings" ) -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. // This is used, for example, when validating a user provided path in docker cp. // If a drive letter is supplied, it must be the system drive. The drive letter // is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with +// need the path in this syntax so that it can ultimately be concatenated with // a Windows long-path which doesn't support drive-letters. Examples: // C: --> Fail // C:\ --> \ diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go index c99d796d13..26c8b42c17 100644 --- a/vendor/github.com/docker/docker/pkg/system/process_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -4,12 +4,14 @@ package system import ( "syscall" + + "golang.org/x/sys/unix" ) // IsProcessAlive returns true if process with a given pid is running. func IsProcessAlive(pid int) bool { - err := syscall.Kill(pid, syscall.Signal(0)) - if err == nil || err == syscall.EPERM { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { return true } @@ -18,5 +20,5 @@ func IsProcessAlive(pid int) bool { // KillProcess force-stops a process. func KillProcess(pid int) { - syscall.Kill(pid, syscall.SIGKILL) + unix.Kill(pid, unix.SIGKILL) } diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go index ca0621f04f..101b569a56 100644 --- a/vendor/github.com/docker/docker/pkg/system/rm.go +++ b/vendor/github.com/docker/docker/pkg/system/rm.go @@ -20,7 +20,7 @@ import ( // These types of errors do not need to be returned since it's ok for the dir to // be gone we can just retry the remove operation. // -// This should not return a `os.ErrNotExist` kind of error under any cirucmstances +// This should not return a `os.ErrNotExist` kind of error under any circumstances func EnsureRemoveAll(dir string) error { notExistErr := make(map[string]bool) diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go index 6208e74606..91c7d121cc 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -2,7 +2,9 @@ package system -import "syscall" +import ( + "syscall" +) // StatT type contains status of a file. It contains metadata // like permission, owner, group, size, etc about a file. diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go index 3ae9128468..49dbdd3781 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -2,12 +2,12 @@ package system -import "syscall" +import "golang.org/x/sys/unix" // Unmount is a platform-specific helper function to call // the unmount syscall. func Unmount(dest string) error { - return syscall.Unmount(dest, 0) + return unix.Unmount(dest, 0) } // CommandLineToArgv should not be used on Unix. diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index c328a6fb86..eded233b9c 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -1,14 +1,14 @@ package system import ( - "syscall" "unsafe" "github.com/Sirupsen/logrus" + "golang.org/x/sys/windows" ) var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") procGetVersionExW = modkernel32.NewProc("GetVersionExW") procGetProductInfo = modkernel32.NewProc("GetProductInfo") ) @@ -42,7 +42,7 @@ type osVersionInfoEx struct { func GetOSVersion() OSVersion { var err error osv := OSVersion{} - osv.Version, err = syscall.GetVersion() + osv.Version, err = windows.GetVersion() if err != nil { // GetVersion never fails. panic(err) @@ -93,20 +93,20 @@ func Unmount(dest string) error { func CommandLineToArgv(commandLine string) ([]string, error) { var argc int32 - argsPtr, err := syscall.UTF16PtrFromString(commandLine) + argsPtr, err := windows.UTF16PtrFromString(commandLine) if err != nil { return nil, err } - argv, err := syscall.CommandLineToArgv(argsPtr, &argc) + argv, err := windows.CommandLineToArgv(argsPtr, &argc) if err != nil { return nil, err } - defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) newArgs := make([]string, argc) for i, v := range (*argv)[:argc] { - newArgs[i] = string(syscall.UTF16ToString((*v)[:])) + newArgs[i] = string(windows.UTF16ToString((*v)[:])) } return newArgs, nil diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go index 3d0146b01a..5a10eda5af 100644 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ b/vendor/github.com/docker/docker/pkg/system/umask.go @@ -3,11 +3,11 @@ package system import ( - "syscall" + "golang.org/x/sys/unix" ) // Umask sets current process's file mode creation mask to newmask // and returns oldmask. func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil + return unix.Umask(newmask), nil } diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go index e2eac3b553..6a77524376 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -3,18 +3,20 @@ package system import ( "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { var _path *byte - _path, err := syscall.BytePtrFromString(path) + _path, err := unix.BytePtrFromString(path) if err != nil { return err } - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { return err } diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go index fc8a1aba95..edc588a63f 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -3,22 +3,21 @@ package system import ( "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - atFdCwd := -100 - atSymLinkNoFollow := 0x100 + atFdCwd := unix.AT_FDCWD var _path *byte - _path, err := syscall.BytePtrFromString(path) + _path, err := unix.BytePtrFromString(path) if err != nil { return err } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { return err } diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index d2e2c05799..98b111be42 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -1,63 +1,29 @@ package system -import ( - "syscall" - "unsafe" -) +import "golang.org/x/sys/unix" // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { + sz, errno := unix.Lgetxattr(path, attr, dest) + if errno == unix.ENODATA { return nil, nil } - if errno == syscall.ERANGE { + if errno == unix.ERANGE { dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + sz, errno = unix.Lgetxattr(path, attr, dest) } - if errno != 0 { + if errno != nil { return nil, errno } return dest[:sz], nil } -var _zero uintptr - // Lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil + return unix.Lsetxattr(path, attr, data, flags) } diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go index fd023ba310..c0332c3cdb 100644 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -6,10 +6,10 @@ import ( "io" "os" "os/signal" + "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE "github.com/Azure/go-ansiterm/winterm" "github.com/docker/docker/pkg/term/windows" - "golang.org/x/sys/windows" ) // State holds the console mode for the terminal. @@ -78,20 +78,24 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { emulateStderr = false } + // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and + // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as + // go-ansiterm hasn't switch to x/sys/windows. + // TODO: switch back to x/sys/windows once go-ansiterm has switched if emulateStdin { - stdIn = windowsconsole.NewAnsiReader(windows.STD_INPUT_HANDLE) + stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) } else { stdIn = os.Stdin } if emulateStdout { - stdOut = windowsconsole.NewAnsiWriter(windows.STD_OUTPUT_HANDLE) + stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) } else { stdOut = os.Stdout } if emulateStderr { - stdErr = windowsconsole.NewAnsiWriter(windows.STD_ERROR_HANDLE) + stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) } else { stdErr = os.Stderr } diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS index b1ba43b86f..0714a80668 100644 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -45,6 +45,7 @@ Dan Williams Daniel, Dao Quang Minh Daniel Garcia Daniel Hiltgen +Daniel Tsui Darren Shepherd Dave Choi David Huie @@ -76,6 +77,7 @@ He Simei Ivan Mikushin James Bardin James Nugent +Jamie Snell Januar Wayong Jari Kolehmainen Jason Wilder @@ -87,6 +89,7 @@ Jen Andre Jérôme Laurens Jim Minter Johan Euphrosine +Johannes Scheuermann John Hughes Jorge Marey Julian Einwag diff --git a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml index 85a2c94e10..9f1f2b2edc 100644 --- a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml +++ b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml @@ -6,7 +6,8 @@ environment: GOPATH: c:\gopath matrix: - GOVERSION: 1.7.5 - - GOVERSION: 1.8.1 + - GOVERSION: 1.8.3 + - GOVERSION: 1.9beta2 install: - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - rmdir c:\go /s /q diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go index af1cd41ca6..ea55323eec 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container.go +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -204,7 +204,7 @@ func (s *State) StateString() string { // PortBinding represents the host/container port mapping as returned in the // `docker inspect` json type PortBinding struct { - HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty" toml:"HostIP,omitempty"` + HostIP string `json:"HostIp,omitempty" yaml:"HostIp,omitempty" toml:"HostIp,omitempty"` HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty" toml:"HostPort,omitempty"` } diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go index b935c6893a..0048153096 100644 --- a/vendor/github.com/fsouza/go-dockerclient/exec.go +++ b/vendor/github.com/fsouza/go-dockerclient/exec.go @@ -6,6 +6,7 @@ package docker import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -29,6 +30,7 @@ type CreateExecOptions struct { AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty" toml:"AttachStdout,omitempty"` AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty" toml:"AttachStderr,omitempty"` Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty" toml:"Tty,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty" toml:"Env,omitempty"` Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty" toml:"Cmd,omitempty"` Container string `json:"Container,omitempty" yaml:"Container,omitempty" toml:"Container,omitempty"` User string `json:"User,omitempty" yaml:"User,omitempty" toml:"User,omitempty"` @@ -41,6 +43,9 @@ type CreateExecOptions struct { // // See https://goo.gl/60TeBP for more details func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { + if len(opts.Env) > 0 && c.serverAPIVersion.LessThan(apiVersion125) { + return nil, errors.New("exec configuration Env is only supported in API#1.25 and above") + } path := fmt.Sprintf("/containers/%s/exec", opts.Container) resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context}) if err != nil { diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 0bd6c505b6..f8827ddd2b 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -37,7 +37,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.28.0" + _VERSION = "1.28.1" ) // Version returns current package version literal. diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go index d522e003a1..eeb8dabaac 100644 --- a/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -408,10 +408,11 @@ func isEmptyValue(v reflect.Value) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflectTime: - return v.Interface().(time.Time).IsZero() case reflect.Interface, reflect.Ptr: return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() } return false } diff --git a/vendor/github.com/go-ldap/ldap/dn.go b/vendor/github.com/go-ldap/ldap/dn.go index a8ece31427..857b2ca73f 100644 --- a/vendor/github.com/go-ldap/ldap/dn.go +++ b/vendor/github.com/go-ldap/ldap/dn.go @@ -143,6 +143,9 @@ func ParseDN(str string) (*DN, error) { } } else if char == ',' || char == '+' { // We're done with this RDN or value, push it + if len(attribute.Type) == 0 { + return nil, errors.New("incomplete type, value pair") + } attribute.Value = stringFromBuffer() rdn.Attributes = append(rdn.Attributes, attribute) attribute = new(AttributeTypeAndValue) diff --git a/vendor/github.com/go-ldap/ldap/filter.go b/vendor/github.com/go-ldap/ldap/filter.go index 7eae310f16..3858a2865c 100644 --- a/vendor/github.com/go-ldap/ldap/filter.go +++ b/vendor/github.com/go-ldap/ldap/filter.go @@ -82,7 +82,10 @@ func CompileFilter(filter string) (*ber.Packet, error) { if err != nil { return nil, err } - if pos != len(filter) { + switch { + case pos > len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + case pos < len(filter): return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:]))) } return packet, nil diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS index 10e2ebb94c..5526e3e900 100644 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -33,6 +33,7 @@ ICHINOSE Shogo INADA Naoki Jacek Szwec James Harr +Jeff Hodges Jian Zhen Joshua Prunier Julien Lefevre diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go index f11e14462a..c341b66808 100644 --- a/vendor/github.com/go-sql-driver/mysql/driver.go +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -66,11 +66,6 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { mc.parseTime = mc.cfg.ParseTime mc.strict = mc.cfg.Strict - // Call startWatcher for context support (From Go 1.8) - if s, ok := interface{}(mc).(watcher); ok { - s.startWatcher() - } - // Connect to Server if dial, ok := dials[mc.cfg.Net]; ok { mc.netConn, err = dial(mc.cfg.Addr) @@ -92,6 +87,11 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { } } + // Call startWatcher for context support (From Go 1.8) + if s, ok := interface{}(mc).(watcher); ok { + s.startWatcher() + } + mc.buf = newBuffer(mc.netConn) // Set I/O timeouts diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go index ca1f2bf144..ab2fdfc6a0 100644 --- a/vendor/github.com/go-sql-driver/mysql/dsn.go +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -15,6 +15,7 @@ import ( "fmt" "net" "net/url" + "sort" "strconv" "strings" "time" @@ -257,7 +258,12 @@ func (cfg *Config) FormatDSN() string { // other params if cfg.Params != nil { - for param, value := range cfg.Params { + var params []string + for param := range cfg.Params { + params = append(params, param) + } + sort.Strings(params) + for _, param := range params { if hasParam { buf.WriteByte('&') } else { @@ -267,7 +273,7 @@ func (cfg *Config) FormatDSN() string { buf.WriteString(param) buf.WriteByte('=') - buf.WriteString(url.QueryEscape(value)) + buf.WriteString(url.QueryEscape(cfg.Params[param])) } } diff --git a/vendor/github.com/gocql/gocql/AUTHORS b/vendor/github.com/gocql/gocql/AUTHORS index ec844a83c0..138a5ee645 100644 --- a/vendor/github.com/gocql/gocql/AUTHORS +++ b/vendor/github.com/gocql/gocql/AUTHORS @@ -89,3 +89,5 @@ Derrick Wippler Leigh McCulloch Ron Kuris Raphael Gavache +Yasser Abdolmaleki +Krishnanand Thommandra diff --git a/vendor/github.com/gocql/gocql/conn.go b/vendor/github.com/gocql/gocql/conn.go index fed11db396..4819d9b34d 100644 --- a/vendor/github.com/gocql/gocql/conn.go +++ b/vendor/github.com/gocql/gocql/conn.go @@ -156,10 +156,10 @@ func Connect(host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHandler, ses // TODO(zariel): remove these if host == nil { panic("host is nil") - } else if len(host.Peer()) == 0 { - panic("host missing peer ip address") + } else if len(host.ConnectAddress()) == 0 { + panic(fmt.Sprintf("host missing connect ip address: %v", host)) } else if host.Port() == 0 { - panic("host missing port") + panic(fmt.Sprintf("host missing port: %v", host)) } var ( @@ -172,7 +172,7 @@ func Connect(host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHandler, ses } // TODO(zariel): handle ipv6 zone - translatedPeer, translatedPort := session.cfg.translateAddressPort(host.Peer(), host.Port()) + translatedPeer, translatedPort := session.cfg.translateAddressPort(host.ConnectAddress(), host.Port()) addr := (&net.TCPAddr{IP: translatedPeer, Port: translatedPort}).String() //addr := (&net.TCPAddr{IP: host.Peer(), Port: host.Port()}).String() @@ -845,7 +845,7 @@ func (c *Conn) executeQuery(qry *Query) *Iter { return &Iter{err: err} } - if len(framer.traceID) > 0 { + if len(framer.traceID) > 0 && qry.trace != nil { qry.trace.Trace(framer.traceID) } diff --git a/vendor/github.com/gocql/gocql/connectionpool.go b/vendor/github.com/gocql/gocql/connectionpool.go index 45c57d81da..08313749cb 100644 --- a/vendor/github.com/gocql/gocql/connectionpool.go +++ b/vendor/github.com/gocql/gocql/connectionpool.go @@ -132,7 +132,7 @@ func (p *policyConnPool) SetHosts(hosts []*HostInfo) { // don't create a connection pool for a down host continue } - ip := host.Peer().String() + ip := host.ConnectAddress().String() if _, exists := p.hostConnPools[ip]; exists { // still have this host, so don't remove it delete(toRemove, ip) @@ -158,7 +158,7 @@ func (p *policyConnPool) SetHosts(hosts []*HostInfo) { createCount-- if pool.Size() > 0 { // add pool only if there a connections available - p.hostConnPools[string(pool.host.Peer())] = pool + p.hostConnPools[string(pool.host.ConnectAddress())] = pool } } @@ -181,7 +181,7 @@ func (p *policyConnPool) Size() int { } func (p *policyConnPool) getPool(host *HostInfo) (pool *hostConnPool, ok bool) { - ip := host.Peer().String() + ip := host.ConnectAddress().String() p.mu.RLock() pool, ok = p.hostConnPools[ip] p.mu.RUnlock() @@ -200,7 +200,7 @@ func (p *policyConnPool) Close() { } func (p *policyConnPool) addHost(host *HostInfo) { - ip := host.Peer().String() + ip := host.ConnectAddress().String() p.mu.Lock() pool, ok := p.hostConnPools[ip] if !ok { @@ -278,7 +278,7 @@ func newHostConnPool(session *Session, host *HostInfo, port, size int, session: session, host: host, port: port, - addr: (&net.TCPAddr{IP: host.Peer(), Port: host.Port()}).String(), + addr: (&net.TCPAddr{IP: host.ConnectAddress(), Port: host.Port()}).String(), size: size, keyspace: keyspace, conns: make([]*Conn, 0, size), @@ -402,7 +402,7 @@ func (pool *hostConnPool) fill() { // this is call with the connection pool mutex held, this call will // then recursively try to lock it again. FIXME - go pool.session.handleNodeDown(pool.host.Peer(), pool.port) + go pool.session.handleNodeDown(pool.host.ConnectAddress(), pool.port) return } @@ -424,7 +424,7 @@ func (pool *hostConnPool) logConnectErr(err error) { // connection refused // these are typical during a node outage so avoid log spam. if gocqlDebug { - Logger.Printf("unable to dial %q: %v\n", pool.host.Peer(), err) + Logger.Printf("unable to dial %q: %v\n", pool.host.ConnectAddress(), err) } } else if err != nil { // unexpected error diff --git a/vendor/github.com/gocql/gocql/control.go b/vendor/github.com/gocql/gocql/control.go index 46fc8a40c9..1f7424137d 100644 --- a/vendor/github.com/gocql/gocql/control.go +++ b/vendor/github.com/gocql/gocql/control.go @@ -132,7 +132,7 @@ func hostInfo(addr string, defaultPort int) (*HostInfo, error) { } - return &HostInfo{peer: ip, port: port}, nil + return &HostInfo{connectAddress: ip, port: port}, nil } func shuffleHosts(hosts []*HostInfo) []*HostInfo { @@ -161,7 +161,7 @@ func (c *controlConn) shuffleDial(endpoints []*HostInfo) (*Conn, error) { return conn, nil } - Logger.Printf("gocql: unable to dial control conn %v: %v\n", host.Peer(), err) + Logger.Printf("gocql: unable to dial control conn %v: %v\n", host.ConnectAddress(), err) } return nil, err @@ -247,16 +247,27 @@ func (c *controlConn) setupConn(conn *Conn) error { c.conn.Store(conn) + if v, ok := conn.conn.RemoteAddr().(*net.TCPAddr); ok { + c.session.handleNodeUp(copyBytes(v.IP), v.Port, false) + return nil + } + host, portstr, err := net.SplitHostPort(conn.conn.RemoteAddr().String()) if err != nil { return err } + port, err := strconv.Atoi(portstr) if err != nil { return err } - c.session.handleNodeUp(net.ParseIP(host), port, false) + ip := net.ParseIP(host) + if ip == nil { + return fmt.Errorf("invalid remote addr: addr=%v host=%q", conn.conn.RemoteAddr(), host) + } + + c.session.handleNodeUp(ip, port, false) return nil } @@ -314,7 +325,7 @@ func (c *controlConn) reconnect(refreshring bool) { if err != nil { // host is dead // TODO: this is replicated in a few places - c.session.handleNodeDown(host.Peer(), host.Port()) + c.session.handleNodeDown(host.ConnectAddress(), host.Port()) } else { newConn = conn } @@ -420,52 +431,13 @@ func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter return } -func (c *controlConn) fetchHostInfo(ip net.IP, port int) (*HostInfo, error) { - // TODO(zariel): we should probably move this into host_source or atleast - // share code with it. - localHost := c.host() - if localHost == nil { - return nil, errors.New("unable to fetch host info, invalid conn host") - } - - isLocal := localHost.Peer().Equal(ip) - - var fn func(*HostInfo) error - - // TODO(zariel): fetch preferred_ip address (is it >3.x only?) - if isLocal { - fn = func(host *HostInfo) error { - iter := c.query("SELECT data_center, rack, host_id, tokens, release_version FROM system.local WHERE key='local'") - iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version) - return iter.Close() - } - } else { - fn = func(host *HostInfo) error { - iter := c.query("SELECT data_center, rack, host_id, tokens, release_version FROM system.peers WHERE peer=?", ip) - iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version) - return iter.Close() - } - } - - host := &HostInfo{ - port: port, - peer: ip, - } - - if err := fn(host); err != nil { - return nil, err - } - - return host, nil -} - func (c *controlConn) awaitSchemaAgreement() error { return c.withConn(func(conn *Conn) *Iter { return &Iter{err: conn.awaitSchemaAgreement()} }).err } -func (c *controlConn) host() *HostInfo { +func (c *controlConn) GetHostInfo() *HostInfo { conn := c.conn.Load().(*Conn) if conn == nil { return nil diff --git a/vendor/github.com/gocql/gocql/events.go b/vendor/github.com/gocql/gocql/events.go index 75a12b7bd5..78daa76b41 100644 --- a/vendor/github.com/gocql/gocql/events.go +++ b/vendor/github.com/gocql/gocql/events.go @@ -174,23 +174,15 @@ func (s *Session) handleNodeEvent(frames []frame) { } func (s *Session) handleNewNode(ip net.IP, port int, waitForBinary bool) { - var hostInfo *HostInfo - if s.control != nil && !s.cfg.IgnorePeerAddr { - var err error - hostInfo, err = s.control.fetchHostInfo(ip, port) - if err != nil { - Logger.Printf("gocql: events: unable to fetch host info for (%s:%d): %v\n", ip, port, err) - return - } - } else { - hostInfo = &HostInfo{peer: ip, port: port} + // Get host info and apply any filters to the host + hostInfo, err := s.hostSource.GetHostInfo(ip, port) + if err != nil { + Logger.Printf("gocql: events: unable to fetch host info for (%s:%d): %v\n", ip, port, err) + return } - if s.cfg.IgnorePeerAddr && hostInfo.Peer().Equal(ip) { - hostInfo.setPeer(ip) - } - - if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(hostInfo) { + // If hostInfo is nil, this host was filtered out by cfg.HostFilter + if hostInfo == nil { return } @@ -216,7 +208,7 @@ func (s *Session) handleRemovedNode(ip net.IP, port int) { // we remove all nodes but only add ones which pass the filter host := s.ring.getHost(ip) if host == nil { - host = &HostInfo{peer: ip, port: port} + host = &HostInfo{connectAddress: ip, port: port} } if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { @@ -240,9 +232,10 @@ func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) { host := s.ring.getHost(ip) if host != nil { - if s.cfg.IgnorePeerAddr && host.Peer().Equal(ip) { - // TODO: how can this ever be true? - host.setPeer(ip) + // If we receive a node up event and user has asked us to ignore the peer address use + // the address provide by the event instead the address provide by peer the table. + if s.cfg.IgnorePeerAddr && !host.ConnectAddress().Equal(ip) { + host.SetConnectAddress(ip) } if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { @@ -269,7 +262,7 @@ func (s *Session) handleNodeDown(ip net.IP, port int) { host := s.ring.getHost(ip) if host == nil { - host = &HostInfo{peer: ip, port: port} + host = &HostInfo{connectAddress: ip, port: port} } if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { diff --git a/vendor/github.com/gocql/gocql/filters.go b/vendor/github.com/gocql/gocql/filters.go index 807a2cf474..32e6ce66cd 100644 --- a/vendor/github.com/gocql/gocql/filters.go +++ b/vendor/github.com/gocql/gocql/filters.go @@ -48,10 +48,10 @@ func WhiteListHostFilter(hosts ...string) HostFilter { m := make(map[string]bool, len(hostInfos)) for _, host := range hostInfos { - m[string(host.peer)] = true + m[host.ConnectAddress().String()] = true } return HostFilterFunc(func(host *HostInfo) bool { - return m[string(host.Peer())] + return m[host.ConnectAddress().String()] }) } diff --git a/vendor/github.com/gocql/gocql/host_source.go b/vendor/github.com/gocql/gocql/host_source.go index 183b0ae9a8..49b8dfd41f 100644 --- a/vendor/github.com/gocql/gocql/host_source.go +++ b/vendor/github.com/gocql/gocql/host_source.go @@ -1,6 +1,7 @@ package gocql import ( + "errors" "fmt" "net" "strconv" @@ -98,15 +99,25 @@ func (c cassVersion) nodeUpDelay() time.Duration { type HostInfo struct { // TODO(zariel): reduce locking maybe, not all values will change, but to ensure // that we are thread safe use a mutex to access all fields. - mu sync.RWMutex - peer net.IP - port int - dataCenter string - rack string - hostId string - version cassVersion - state nodeState - tokens []string + mu sync.RWMutex + peer net.IP + broadcastAddress net.IP + listenAddress net.IP + rpcAddress net.IP + preferredIP net.IP + connectAddress net.IP + port int + dataCenter string + rack string + hostId string + workload string + graph bool + dseVersion string + partitioner string + clusterName string + version cassVersion + state nodeState + tokens []string } func (h *HostInfo) Equal(host *HostInfo) bool { @@ -115,7 +126,7 @@ func (h *HostInfo) Equal(host *HostInfo) bool { host.mu.RLock() defer host.mu.RUnlock() - return h.peer.Equal(host.peer) + return h.ConnectAddress().Equal(host.ConnectAddress()) } func (h *HostInfo) Peer() net.IP { @@ -131,6 +142,63 @@ func (h *HostInfo) setPeer(peer net.IP) *HostInfo { return h } +func (h *HostInfo) invalidConnectAddr() bool { + addr := h.ConnectAddress() + return addr == nil || addr.IsUnspecified() +} + +// Returns the address that should be used to connect to the host. +// If you wish to override this, use an AddressTranslator or +// use a HostFilter to SetConnectAddress() +func (h *HostInfo) ConnectAddress() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + + if h.connectAddress == nil { + // Use 'rpc_address' if provided and it's not 0.0.0.0 + if h.rpcAddress != nil && !h.rpcAddress.IsUnspecified() { + return h.rpcAddress + } else if h.broadcastAddress != nil && !h.broadcastAddress.IsUnspecified() { + return h.broadcastAddress + } else if h.peer != nil { + // Peer should always be set if this from 'system.peer' + return h.peer + } + } + return h.connectAddress +} + +func (h *HostInfo) SetConnectAddress(address net.IP) *HostInfo { + h.mu.Lock() + defer h.mu.Unlock() + h.connectAddress = address + return h +} + +func (h *HostInfo) BroadcastAddress() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + return h.broadcastAddress +} + +func (h *HostInfo) ListenAddress() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + return h.listenAddress +} + +func (h *HostInfo) RPCAddress() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + return h.rpcAddress +} + +func (h *HostInfo) PreferredIP() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + return h.preferredIP +} + func (h *HostInfo) DataCenter() string { h.mu.RLock() defer h.mu.RUnlock() @@ -170,6 +238,36 @@ func (h *HostInfo) setHostID(hostID string) *HostInfo { return h } +func (h *HostInfo) WorkLoad() string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.workload +} + +func (h *HostInfo) Graph() bool { + h.mu.RLock() + defer h.mu.RUnlock() + return h.graph +} + +func (h *HostInfo) DSEVersion() string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.dseVersion +} + +func (h *HostInfo) Partitioner() string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.partitioner +} + +func (h *HostInfo) ClusterName() string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.clusterName +} + func (h *HostInfo) Version() cassVersion { h.mu.RLock() defer h.mu.RUnlock() @@ -239,25 +337,24 @@ func (h *HostInfo) IsUp() bool { func (h *HostInfo) String() string { h.mu.RLock() defer h.mu.RUnlock() - return fmt.Sprintf("[hostinfo peer=%q port=%d data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", h.peer, h.port, h.dataCenter, h.rack, h.hostId, h.version, h.state, len(h.tokens)) + return fmt.Sprintf("[HostInfo connectAddress=%q peer=%q rpc_address=%q broadcast_address=%q "+ + "port=%d data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", + h.connectAddress, h.peer, h.rpcAddress, h.broadcastAddress, + h.port, h.dataCenter, h.rack, h.hostId, h.version, h.state, len(h.tokens)) } // Polls system.peers at a specific interval to find new hosts type ringDescriber struct { - dcFilter string - rackFilter string - session *Session - closeChan chan bool - // indicates that we can use system.local to get the connections remote address - localHasRpcAddr bool - + session *Session mu sync.Mutex prevHosts []*HostInfo + localHost *HostInfo prevPartitioner string } -func checkSystemLocal(control *controlConn) (bool, error) { - iter := control.query("SELECT broadcast_address FROM system.local") +// Returns true if we are using system_schema.keyspaces instead of system.schema_keyspaces +func checkSystemSchema(control *controlConn) (bool, error) { + iter := control.query("SELECT * FROM system_schema.keyspaces") if err := iter.err; err != nil { if errf, ok := err.(*errorFrame); ok { if errf.code == errSyntax { @@ -271,106 +368,279 @@ func checkSystemLocal(control *controlConn) (bool, error) { return true, nil } -// Returns true if we are using system_schema.keyspaces instead of system.schema_keyspaces -func checkSystemSchema(control *controlConn) (bool, error) { - iter := control.query("SELECT * FROM system_schema.keyspaces") - if err := iter.err; err != nil { - if errf, ok := err.(*errorFrame); ok { - if errf.code == errReadFailure { - return false, nil +// Given a map that represents a row from either system.local or system.peers +// return as much information as we can in *HostInfo +func (r *ringDescriber) hostInfoFromMap(row map[string]interface{}) (*HostInfo, error) { + const assertErrorMsg = "Assertion failed for %s" + var ok bool + + // Default to our connected port if the cluster doesn't have port information + host := HostInfo{ + port: r.session.cfg.Port, + } + + for key, value := range row { + switch key { + case "data_center": + host.dataCenter, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "data_center") + } + case "rack": + host.rack, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "rack") + } + case "host_id": + hostId, ok := value.(UUID) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "host_id") + } + host.hostId = hostId.String() + case "release_version": + version, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "release_version") + } + host.version.Set(version) + case "peer": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "peer") + } + host.peer = net.ParseIP(ip) + case "cluster_name": + host.clusterName, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "cluster_name") + } + case "partitioner": + host.partitioner, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "partitioner") + } + case "broadcast_address": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "broadcast_address") + } + host.broadcastAddress = net.ParseIP(ip) + case "preferred_ip": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "preferred_ip") + } + host.preferredIP = net.ParseIP(ip) + case "rpc_address": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "rpc_address") + } + host.rpcAddress = net.ParseIP(ip) + case "listen_address": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "listen_address") + } + host.listenAddress = net.ParseIP(ip) + case "workload": + host.workload, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "workload") + } + case "graph": + host.graph, ok = value.(bool) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "graph") + } + case "tokens": + host.tokens, ok = value.([]string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "tokens") + } + case "dse_version": + host.dseVersion, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "dse_version") } } - - return false, err + // TODO(thrawn01): Add 'port'? once CASSANDRA-7544 is complete + // Not sure what the port field will be called until the JIRA issue is complete } - return true, nil + return &host, nil } -func (r *ringDescriber) GetHosts() (hosts []*HostInfo, partitioner string, err error) { - r.mu.Lock() - defer r.mu.Unlock() - // we need conn to be the same because we need to query system.peers and system.local - // on the same node to get the whole cluster - - const ( - legacyLocalQuery = "SELECT data_center, rack, host_id, tokens, partitioner, release_version FROM system.local" - // only supported in 2.2.0, 2.1.6, 2.0.16 - localQuery = "SELECT broadcast_address, data_center, rack, host_id, tokens, partitioner, release_version FROM system.local" - ) - - localHost := &HostInfo{} - if r.localHasRpcAddr { - iter := r.session.control.query(localQuery) - if iter == nil { - return r.prevHosts, r.prevPartitioner, nil - } - - iter.Scan(&localHost.peer, &localHost.dataCenter, &localHost.rack, - &localHost.hostId, &localHost.tokens, &partitioner, &localHost.version) - - if err = iter.Close(); err != nil { - return nil, "", err - } - } else { - iter := r.session.control.withConn(func(c *Conn) *Iter { - localHost = c.host - return c.query(legacyLocalQuery) - }) - - if iter == nil { - return r.prevHosts, r.prevPartitioner, nil - } - - iter.Scan(&localHost.dataCenter, &localHost.rack, &localHost.hostId, &localHost.tokens, &partitioner, &localHost.version) - - if err = iter.Close(); err != nil { - return nil, "", err - } +// Ask the control node for it's local host information +func (r *ringDescriber) GetLocalHostInfo() (*HostInfo, error) { + it := r.session.control.query("SELECT * FROM system.local WHERE key='local'") + if it == nil { + return nil, errors.New("Attempted to query 'system.local' on a closed control connection") + } + host, err := r.extractHostInfo(it) + if err != nil { + return nil, err } - localHost.port = r.session.cfg.Port - - hosts = []*HostInfo{localHost} - - rows := r.session.control.query("SELECT rpc_address, data_center, rack, host_id, tokens, release_version FROM system.peers").Scanner() - if rows == nil { - return r.prevHosts, r.prevPartitioner, nil + if host.invalidConnectAddr() { + host.SetConnectAddress(r.session.control.GetHostInfo().ConnectAddress()) } - for rows.Next() { - host := &HostInfo{port: r.session.cfg.Port} - err := rows.Scan(&host.peer, &host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version) + return host, nil +} + +// Given an ip address and port, return a peer that matched the ip address +func (r *ringDescriber) GetPeerHostInfo(ip net.IP, port int) (*HostInfo, error) { + it := r.session.control.query("SELECT * FROM system.peers WHERE peer=?", ip) + if it == nil { + return nil, errors.New("Attempted to query 'system.peers' on a closed control connection") + } + return r.extractHostInfo(it) +} + +func (r *ringDescriber) extractHostInfo(it *Iter) (*HostInfo, error) { + row := make(map[string]interface{}) + + // expect only 1 row + it.MapScan(row) + if err := it.Close(); err != nil { + return nil, err + } + + // extract all available info about the host + return r.hostInfoFromMap(row) +} + +// Ask the control node for host info on all it's known peers +func (r *ringDescriber) GetClusterPeerInfo() ([]*HostInfo, error) { + var hosts []*HostInfo + + // Ask the node for a list of it's peers + it := r.session.control.query("SELECT * FROM system.peers") + if it == nil { + return nil, errors.New("Attempted to query 'system.peers' on a closed connection") + } + + for { + row := make(map[string]interface{}) + if !it.MapScan(row) { + break + } + // extract all available info about the peer + host, err := r.hostInfoFromMap(row) if err != nil { - Logger.Println(err) + return nil, err + } + + // If it's not a valid peer + if !r.IsValidPeer(host) { + Logger.Printf("Found invalid peer '%+v' "+ + "Likely due to a gossip or snitch issue, this host will be ignored", host) continue } + hosts = append(hosts, host) + } + if it.err != nil { + return nil, fmt.Errorf("while scanning 'system.peers' table: %s", it.err) + } + return hosts, nil +} - if r.matchFilter(host) { - hosts = append(hosts, host) +// Return true if the host is a valid peer +func (r *ringDescriber) IsValidPeer(host *HostInfo) bool { + return !(len(host.RPCAddress()) == 0 || + host.hostId == "" || + host.dataCenter == "" || + host.rack == "" || + len(host.tokens) == 0) +} + +// Return a list of hosts the cluster knows about +func (r *ringDescriber) GetHosts() ([]*HostInfo, string, error) { + r.mu.Lock() + defer r.mu.Unlock() + + // Update the localHost info with data from the connected host + localHost, err := r.GetLocalHostInfo() + if err != nil { + return r.prevHosts, r.prevPartitioner, err + } else if localHost.invalidConnectAddr() { + panic(fmt.Sprintf("unable to get localhost connect address: %v", localHost)) + } + + // Update our list of hosts by querying the cluster + hosts, err := r.GetClusterPeerInfo() + if err != nil { + return r.prevHosts, r.prevPartitioner, err + } + + hosts = append(hosts, localHost) + + // Filter the hosts if filter is provided + filteredHosts := hosts + if r.session.cfg.HostFilter != nil { + filteredHosts = filteredHosts[:0] + for _, host := range hosts { + if r.session.cfg.HostFilter.Accept(host) { + filteredHosts = append(filteredHosts, host) + } } } - if err = rows.Err(); err != nil { - return nil, "", err - } + r.prevHosts = filteredHosts + r.prevPartitioner = localHost.partitioner + r.localHost = localHost - r.prevHosts = hosts - r.prevPartitioner = partitioner - - return hosts, partitioner, nil + return filteredHosts, localHost.partitioner, nil } -func (r *ringDescriber) matchFilter(host *HostInfo) bool { - if r.dcFilter != "" && r.dcFilter != host.DataCenter() { - return false +// Given an ip/port return HostInfo for the specified ip/port +func (r *ringDescriber) GetHostInfo(ip net.IP, port int) (*HostInfo, error) { + // TODO(thrawn01): Is IgnorePeerAddr still useful now that we have DisableInitialHostLookup? + // TODO(thrawn01): should we also check for DisableInitialHostLookup and return if true? + + // Ignore the port and connect address and use the address/port we already have + if r.session.control == nil || r.session.cfg.IgnorePeerAddr { + return &HostInfo{connectAddress: ip, port: port}, nil } - if r.rackFilter != "" && r.rackFilter != host.Rack() { - return false + // Attempt to get the host info for our control connection + controlHost := r.session.control.GetHostInfo() + if controlHost == nil { + return nil, errors.New("invalid control connection") } - return true + var ( + host *HostInfo + err error + ) + + // If we are asking about the same node our control connection has a connection too + if controlHost.ConnectAddress().Equal(ip) { + host, err = r.GetLocalHostInfo() + } else { + host, err = r.GetPeerHostInfo(ip, port) + } + + // No host was found matching this ip/port + if err != nil { + return nil, err + } + + if controlHost.ConnectAddress().Equal(ip) { + // Always respect the provided control node address and disregard the ip address + // the cassandra node provides. We do this as we are already connected and have a + // known valid ip address. This insulates gocql from client connection issues stemming + // from node misconfiguration. For instance when a node is run from a container, by + // default the node will report its ip address as 127.0.0.1 which is typically invalid. + host.SetConnectAddress(ip) + } + + if host.invalidConnectAddr() { + return nil, fmt.Errorf("host ConnectAddress invalid: %v", host) + } + + return host, nil } func (r *ringDescriber) refreshRing() error { @@ -386,13 +656,11 @@ func (r *ringDescriber) refreshRing() error { // TODO: move this to session // TODO: handle removing hosts here for _, h := range hosts { - if r.session.cfg.HostFilter == nil || r.session.cfg.HostFilter.Accept(h) { - if host, ok := r.session.ring.addHostIfMissing(h); !ok { - r.session.pool.addHost(h) - r.session.policy.AddHost(h) - } else { - host.update(h) - } + if host, ok := r.session.ring.addHostIfMissing(h); !ok { + r.session.pool.addHost(h) + r.session.policy.AddHost(h) + } else { + host.update(h) } } diff --git a/vendor/github.com/gocql/gocql/marshal.go b/vendor/github.com/gocql/gocql/marshal.go index 3ebea49e3d..45feea8fa5 100644 --- a/vendor/github.com/gocql/gocql/marshal.go +++ b/vendor/github.com/gocql/gocql/marshal.go @@ -1595,12 +1595,11 @@ func marshalTuple(info TypeInfo, value interface{}) ([]byte, error) { case unsetColumn: return nil, unmarshalErrorf("Invalid request: UnsetValue is unsupported for tuples") case []interface{}: - var buf []byte - if len(v) != len(tuple.Elems) { return nil, unmarshalErrorf("cannont marshal tuple: wrong number of elements") } + var buf []byte for i, elem := range v { data, err := Marshal(tuple.Elems[i], elem) if err != nil { @@ -1615,7 +1614,51 @@ func marshalTuple(info TypeInfo, value interface{}) ([]byte, error) { return buf, nil } - return nil, unmarshalErrorf("cannot marshal %T into %s", value, tuple) + rv := reflect.ValueOf(value) + t := rv.Type() + k := t.Kind() + + switch k { + case reflect.Struct: + if v := t.NumField(); v != len(tuple.Elems) { + return nil, marshalErrorf("can not marshal tuple into struct %v, not enough fields have %d need %d", t, v, len(tuple.Elems)) + } + + var buf []byte + for i, elem := range tuple.Elems { + data, err := Marshal(elem, rv.Field(i).Interface()) + if err != nil { + return nil, err + } + + n := len(data) + buf = appendInt(buf, int32(n)) + buf = append(buf, data...) + } + + return buf, nil + case reflect.Slice, reflect.Array: + size := rv.Len() + if size != len(tuple.Elems) { + return nil, marshalErrorf("can not marshal tuple into %v of length %d need %d elements", k, size, len(tuple.Elems)) + } + + var buf []byte + for i, elem := range tuple.Elems { + data, err := Marshal(elem, rv.Index(i).Interface()) + if err != nil { + return nil, err + } + + n := len(data) + buf = appendInt(buf, int32(n)) + buf = append(buf, data...) + } + + return buf, nil + } + + return nil, marshalErrorf("cannot marshal %T into %s", value, tuple) } // currently only support unmarshal into a list of values, this makes it possible @@ -1644,6 +1687,61 @@ func unmarshalTuple(info TypeInfo, data []byte, value interface{}) error { return nil } + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + + rv = rv.Elem() + t := rv.Type() + k := t.Kind() + + switch k { + case reflect.Struct: + if v := t.NumField(); v != len(tuple.Elems) { + return unmarshalErrorf("can not unmarshal tuple into struct %v, not enough fields have %d need %d", t, v, len(tuple.Elems)) + } + + for i, elem := range tuple.Elems { + m := readInt(data) + data = data[4:] + + v := elem.New() + if err := Unmarshal(elem, data[:m], v); err != nil { + return err + } + rv.Field(i).Set(reflect.ValueOf(v).Elem()) + + data = data[m:] + } + + return nil + case reflect.Slice, reflect.Array: + if k == reflect.Array { + size := rv.Len() + if size != len(tuple.Elems) { + return unmarshalErrorf("can not unmarshal tuple into array of length %d need %d elements", size, len(tuple.Elems)) + } + } else { + rv.Set(reflect.MakeSlice(t, len(tuple.Elems), len(tuple.Elems))) + } + + for i, elem := range tuple.Elems { + m := readInt(data) + data = data[4:] + + v := elem.New() + if err := Unmarshal(elem, data[:m], v); err != nil { + return err + } + rv.Index(i).Set(reflect.ValueOf(v).Elem()) + + data = data[m:] + } + + return nil + } + return unmarshalErrorf("cannot unmarshal %s into %T", info, value) } diff --git a/vendor/github.com/gocql/gocql/policies.go b/vendor/github.com/gocql/gocql/policies.go index 14d665a02a..0d49f17e3e 100644 --- a/vendor/github.com/gocql/gocql/policies.go +++ b/vendor/github.com/gocql/gocql/policies.go @@ -105,7 +105,7 @@ func (c *cowHostList) remove(ip net.IP) bool { found := false newL := make([]*HostInfo, 0, size) for i := 0; i < len(l); i++ { - if !l[i].Peer().Equal(ip) { + if !l[i].ConnectAddress().Equal(ip) { newL = append(newL, l[i]) } else { found = true @@ -267,7 +267,7 @@ func (r *roundRobinHostPolicy) AddHost(host *HostInfo) { } func (r *roundRobinHostPolicy) RemoveHost(host *HostInfo) { - r.hosts.remove(host.Peer()) + r.hosts.remove(host.ConnectAddress()) } func (r *roundRobinHostPolicy) HostUp(host *HostInfo) { @@ -310,7 +310,7 @@ func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) { } func (t *tokenAwareHostPolicy) RemoveHost(host *HostInfo) { - t.hosts.remove(host.Peer()) + t.hosts.remove(host.ConnectAddress()) t.fallback.RemoveHost(host) t.resetTokenRing() @@ -424,7 +424,7 @@ func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) { hostMap := make(map[string]*HostInfo, len(hosts)) for i, host := range hosts { - ip := host.Peer().String() + ip := host.ConnectAddress().String() peers[i] = ip hostMap[ip] = host } @@ -436,7 +436,7 @@ func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) { } func (r *hostPoolHostPolicy) AddHost(host *HostInfo) { - ip := host.Peer().String() + ip := host.ConnectAddress().String() r.mu.Lock() defer r.mu.Unlock() @@ -457,7 +457,7 @@ func (r *hostPoolHostPolicy) AddHost(host *HostInfo) { } func (r *hostPoolHostPolicy) RemoveHost(host *HostInfo) { - ip := host.Peer().String() + ip := host.ConnectAddress().String() r.mu.Lock() defer r.mu.Unlock() @@ -469,7 +469,7 @@ func (r *hostPoolHostPolicy) RemoveHost(host *HostInfo) { delete(r.hostMap, ip) hosts := make([]string, 0, len(r.hostMap)) for _, host := range r.hostMap { - hosts = append(hosts, host.Peer().String()) + hosts = append(hosts, host.ConnectAddress().String()) } r.hp.SetHosts(hosts) @@ -523,7 +523,7 @@ func (host selectedHostPoolHost) Info() *HostInfo { } func (host selectedHostPoolHost) Mark(err error) { - ip := host.info.Peer().String() + ip := host.info.ConnectAddress().String() host.policy.mu.RLock() defer host.policy.mu.RUnlock() diff --git a/vendor/github.com/gocql/gocql/ring.go b/vendor/github.com/gocql/gocql/ring.go index 45ace957f7..37e9c436bb 100644 --- a/vendor/github.com/gocql/gocql/ring.go +++ b/vendor/github.com/gocql/gocql/ring.go @@ -1,6 +1,7 @@ package gocql import ( + "fmt" "net" "sync" "sync/atomic" @@ -53,7 +54,10 @@ func (r *ring) allHosts() []*HostInfo { } func (r *ring) addHost(host *HostInfo) bool { - ip := host.Peer().String() + if host.invalidConnectAddr() { + panic(fmt.Sprintf("invalid host: %v", host)) + } + ip := host.ConnectAddress().String() r.mu.Lock() if r.hosts == nil { @@ -79,7 +83,10 @@ func (r *ring) addOrUpdate(host *HostInfo) *HostInfo { } func (r *ring) addHostIfMissing(host *HostInfo) (*HostInfo, bool) { - ip := host.Peer().String() + if host.invalidConnectAddr() { + panic(fmt.Sprintf("invalid host: %v", host)) + } + ip := host.ConnectAddress().String() r.mu.Lock() if r.hosts == nil { @@ -106,7 +113,7 @@ func (r *ring) removeHost(ip net.IP) bool { _, ok := r.hosts[k] if ok { for i, host := range r.hostList { - if host.Peer().Equal(ip) { + if host.ConnectAddress().Equal(ip) { r.hostList = append(r.hostList[:i], r.hostList[i+1:]...) break } diff --git a/vendor/github.com/gocql/gocql/session.go b/vendor/github.com/gocql/gocql/session.go index 4ab1e8ed95..27945ffe91 100644 --- a/vendor/github.com/gocql/gocql/session.go +++ b/vendor/github.com/gocql/gocql/session.go @@ -118,8 +118,7 @@ func NewSession(cfg ClusterConfig) (*Session, error) { s.routingKeyInfoCache.lru = lru.New(cfg.MaxRoutingKeyInfo) s.hostSource = &ringDescriber{ - session: s, - closeChan: make(chan bool), + session: s, } if cfg.PoolConfig.HostSelectionPolicy == nil { @@ -181,26 +180,19 @@ func (s *Session) init() error { return err } - // need to setup host source to check for broadcast_address in system.local - localHasRPCAddr, _ := checkSystemLocal(s.control) - s.hostSource.localHasRpcAddr = localHasRPCAddr - if !s.cfg.DisableInitialHostLookup { - // TODO(zariel): we need to get the partitioner from here - var p string - hosts, p, err = s.hostSource.GetHosts() + var partitioner string + hosts, partitioner, err = s.hostSource.GetHosts() if err != nil { return err } - s.policy.SetPartitioner(p) + s.policy.SetPartitioner(partitioner) } } for _, host := range hosts { - if s.cfg.HostFilter == nil || s.cfg.HostFilter.Accept(host) { - host = s.ring.addOrUpdate(host) - s.handleNodeUp(host.Peer(), host.Port(), false) - } + host = s.ring.addOrUpdate(host) + s.handleNodeUp(host.ConnectAddress(), host.Port(), false) } // TODO(zariel): we probably dont need this any more as we verify that we @@ -241,7 +233,7 @@ func (s *Session) reconnectDownedHosts(intv time.Duration) { if gocqlDebug { buf := bytes.NewBufferString("Session.ring:") for _, h := range hosts { - buf.WriteString("[" + h.Peer().String() + ":" + h.State().String() + "]") + buf.WriteString("[" + h.ConnectAddress().String() + ":" + h.State().String() + "]") } Logger.Println(buf.String()) } @@ -250,7 +242,7 @@ func (s *Session) reconnectDownedHosts(intv time.Duration) { if h.IsUp() { continue } - s.handleNodeUp(h.Peer(), h.Port(), true) + s.handleNodeUp(h.ConnectAddress(), h.Port(), true) } case <-s.quit: return @@ -351,10 +343,6 @@ func (s *Session) Close() { s.pool.Close() } - if s.hostSource != nil { - close(s.hostSource.closeChan) - } - if s.control != nil { s.control.close() } diff --git a/vendor/github.com/gocql/gocql/token.go b/vendor/github.com/gocql/gocql/token.go index 1113b9276a..edc37302b1 100644 --- a/vendor/github.com/gocql/gocql/token.go +++ b/vendor/github.com/gocql/gocql/token.go @@ -184,7 +184,7 @@ func (t *tokenRing) String() string { buf.WriteString("]") buf.WriteString(t.tokens[i].String()) buf.WriteString(":") - buf.WriteString(t.hosts[i].Peer().String()) + buf.WriteString(t.hosts[i].ConnectAddress().String()) } buf.WriteString("\n}") return string(buf.Bytes()) diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 0000000000..7be0cc7b62 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,36 @@ +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 0000000000..41c717573e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 0000000000..5d4cba4b51 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,234 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 0000000000..737f2731d4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,978 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { + if isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.GetExtensions() + *ext = append(*ext, o.buf[oi:o.index]...) + } + continue + } + } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go new file mode 100644 index 0000000000..6fb74de4cc --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -0,0 +1,172 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +// Decode a reference to a struct pointer. +func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is a pointer receiver") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + bas := structPointer_FieldPointer(base, p.field) + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers ([]struct). +func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { + newBas := appendStructPointer(base, p.field, p.sstype) + + if is_group { + panic("not supported, maybe in future, if requested.") + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is not a pointer receiver.") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) + + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers. +func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_ref_struct(p, false, base) +} + +func setPtrCustomType(base structPointer, f field, v interface{}) { + if v == nil { + return + } + structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) +} + +func setCustomType(base structPointer, f field, value interface{}) { + if value == nil { + return + } + v := reflect.ValueOf(value).Elem() + t := reflect.TypeOf(value).Elem() + kind := t.Kind() + switch kind { + case reflect.Slice: + slice := reflect.MakeSlice(t, v.Len(), v.Cap()) + reflect.Copy(slice, v) + oldHeader := structPointer_GetSliceHeader(base, f) + oldHeader.Data = slice.Pointer() + oldHeader.Len = v.Len() + oldHeader.Cap = v.Cap() + default: + size := reflect.TypeOf(value).Elem().Size() + structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) + } +} + +func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + setPtrCustomType(base, p.field, custom) + return nil +} + +func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + if custom != nil { + setCustomType(base, p.field, custom) + } + return nil +} + +// Decode a slice of bytes ([]byte) into a slice of custom types. +func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + newBas := appendStructPointer(base, p.field, p.ctype) + + var zero field + setCustomType(newBas, zero, custom) + + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 0000000000..93464c91cf --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 0000000000..18e2a5f776 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,203 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} + +func (o *Buffer) decDuration() (time.Duration, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return 0, err + } + dproto := &duration{} + if err := Unmarshal(b, dproto); err != nil { + return 0, err + } + return durationFromProto(dproto) +} + +func (o *Buffer) dec_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) + var zero field + setPtrCustomType(newBas, zero, &d) + return nil +} + +func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + structPointer_Word64Slice(base, p.field).Append(uint64(d)) + return nil +} + +func size_duration(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_duration(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_duration(p *Properties, base structPointer) (n int) { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return 0 + } + dproto := durationProto(*durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return errRepeatedHasNil + } + dproto := durationProto(*durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 0000000000..2b30f84626 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,1362 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 0000000000..32111b7f41 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,350 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://github.com/golang/protobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} + +type Sizer interface { + Size() int +} + +func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, s...) + return nil +} + +func size_ext_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(s) + return +} + +// Encode a reference to bool pointer. +func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + x := 0 + if v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_bool(p *Properties, base structPointer) int { + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode a reference to int32 pointer. +func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a reference to an int64 pointer. +func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_ref_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a reference to a string pointer. +func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_ref_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// Encode a reference to a message struct. +func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +//TODO this is only copied, please fix this +func size_ref_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a slice of references to message struct pointers ([]struct). +func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + } + return state.err +} + +//TODO this is only copied, please fix this +func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return ErrNil + } + custom := i.(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { + custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceAt(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return ErrNil + } + slice := reflect.ValueOf(inter) + l := slice.Len() + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return 0 + } + slice := reflect.ValueOf(inter) + l := slice.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + } + return +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 0000000000..2ed1cf5966 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 0000000000..0dfcb538e8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,693 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +type extensionRange interface { + Message + ExtensionRangeArray() []ExtensionRange +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() +var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() +var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extensionRange, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() + return ok +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, doki := pb.(extensionsBytes); doki { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, ok := extendable(pb) + if !ok { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + o := 0 + for o < len(*ext) { + tag, n := DecodeVarint((*ext)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size((*ext)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + v, err := decodeExtension((*ext)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) + } + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(pb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, doki := pb.(extensionsBytes); doki { + ClearExtension(pb, extension) + ext := epb.GetExtensions() + et := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + p := NewBuffer(nil) + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + *ext = append(*ext, p.buf...) + return nil + } + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 0000000000..ea6478f009 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,294 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" +) + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + return SizeOfExtensionMap(m.extensionsWrite()) +} + +func SizeOfExtensionMap(m map[int32]Extension) (n int) { + return extensionsMapSize(m) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + if err := encodeExtensionsMap(m); err != nil { + return 0, err + } + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + n += copy(data[n:], m[int32(k)].enc) + } + return n, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + if m[id].value == nil || m[id].desc == nil { + return m[id].enc, nil + } + if err := encodeExtensionsMap(m); err != nil { + return nil, err + } + return m[id].enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(e *Extension) error { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + return nil + } + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + return nil +} + +func (this Extension) GoString() string { + if this.enc == nil { + if err := encodeExtension(&this); err != nil { + panic(err) + } + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 0000000000..7580bb45c6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,898 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 0000000000..4b4f7c909e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,42 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 0000000000..fd982decd6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000000..fb512e2e16 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 0000000000..1763a5f227 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,85 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +package proto + +import ( + "reflect" +) + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + panic("not implemented") +} + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func structPointer_Add(p structPointer, size field) structPointer { + panic("not implemented") +} + +func structPointer_Len(p structPointer, f field) int { + panic("not implemented") +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + panic("not implemented") +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + panic("not implemented") +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + panic("not implemented") +} + +type structRefSlice struct{} + +func (v *structRefSlice) Len() int { + panic("not implemented") +} + +func (v *structRefSlice) Index(i int) structPointer { + panic("not implemented") +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000000..6b5567d47c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 0000000000..f156a29f0e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,128 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + return r.Interface() +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + if r.Elem().IsNil() { + return nil + } + return r.Elem().Interface() +} + +func copyUintPtr(oldptr, newptr uintptr, size int) { + oldbytes := make([]byte, 0) + oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) + oldslice.Data = oldptr + oldslice.Len = size + oldslice.Cap = size + newbytes := make([]byte, 0) + newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) + newslice.Data = newptr + newslice.Len = size + newslice.Cap = size + copy(newbytes, oldbytes) +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + copyUintPtr(uintptr(oldptr), uintptr(newptr), size) +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + size := typ.Elem().Size() + + oldHeader := structPointer_GetSliceHeader(base, f) + oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() + newLen := oldHeader.Len + 1 + newSlice := reflect.MakeSlice(typ, newLen, newLen) + reflect.Copy(newSlice, oldSlice) + bas := toStructPointer(newSlice) + oldHeader.Data = uintptr(bas) + oldHeader.Len = newLen + oldHeader.Cap = newLen + + return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) +} + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_Add(p structPointer, size field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) +} + +func structPointer_Len(p structPointer, f field) int { + return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + return &structRefSlice{p: p, f: f, size: size} +} + +// A structRefSlice represents a slice of structs (themselves submessages or groups). +type structRefSlice struct { + p structPointer + f field + size uintptr +} + +func (v *structRefSlice) Len() int { + return structPointer_Len(v.p, v.f) +} + +func (v *structRefSlice) Index(i int) structPointer { + ss := structPointer_GetStructPointer(v.p, v.f) + ss1 := structPointer_GetRefStructPointer(ss, 0) + return structPointer_Add(ss1, field(uintptr(i)*v.size)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 0000000000..44b332052e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,968 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + StdTime bool + StdDuration bool + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sstype reflect.Type // set for slices of structs types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.setCustomEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTimeEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + } else { + p.enc = (*Buffer).enc_ref_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_ref_bool + } + case reflect.Int32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + } else { + p.enc = (*Buffer).enc_ref_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_int32 + } + case reflect.Uint32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_ref_uint32 + } + case reflect.Int64, reflect.Uint64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.Float32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_uint32 + } + case reflect.Float64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.String: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + } else { + p.enc = (*Buffer).enc_ref_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_ref_string + } + case reflect.Struct: + p.stype = typ + p.isMarshaler = isMarshaler(typ) + p.isUnmarshaler = isUnmarshaler(typ) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_ref_struct_message + p.dec = (*Buffer).dec_ref_struct_message + p.size = size_ref_struct_message + } else { + fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) + } + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + case reflect.Struct: + p.setSliceOfNonPointerStructs(t1) + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) || + reflect.PtrTo(t).Implements(extendableBytesType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + if len(f.Tag.Get("protobuf")) > 0 { + p.enc = (*Buffer).enc_ext_slice_byte + p.dec = nil // not needed + p.size = size_ext_slice_byte + } else { + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 0000000000..b6b7176c56 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,111 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "os" + "reflect" +) + +func (p *Properties) setCustomEncAndDec(typ reflect.Type) { + p.ctype = typ + if p.Repeated { + p.enc = (*Buffer).enc_custom_slice_bytes + p.dec = (*Buffer).dec_custom_slice_bytes + p.size = size_custom_slice_bytes + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_custom_bytes + p.dec = (*Buffer).dec_custom_bytes + p.size = size_custom_bytes + } else { + p.enc = (*Buffer).enc_custom_ref_bytes + p.dec = (*Buffer).dec_custom_ref_bytes + p.size = size_custom_ref_bytes + } +} + +func (p *Properties) setDurationEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_duration + p.dec = (*Buffer).dec_slice_duration + p.size = size_slice_duration + } else { + p.enc = (*Buffer).enc_slice_ref_duration + p.dec = (*Buffer).dec_slice_ref_duration + p.size = size_slice_ref_duration + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_duration + p.dec = (*Buffer).dec_duration + p.size = size_duration + } else { + p.enc = (*Buffer).enc_ref_duration + p.dec = (*Buffer).dec_ref_duration + p.size = size_ref_duration + } +} + +func (p *Properties) setTimeEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_time + p.dec = (*Buffer).dec_slice_time + p.size = size_slice_time + } else { + p.enc = (*Buffer).enc_slice_ref_time + p.dec = (*Buffer).dec_slice_ref_time + p.size = size_slice_ref_time + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_time + p.dec = (*Buffer).dec_time + p.size = size_time + } else { + p.enc = (*Buffer).enc_ref_time + p.dec = (*Buffer).dec_ref_time + p.size = size_ref_time + } + +} + +func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { + t2 := typ.Elem() + p.sstype = typ + p.stype = t2 + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + p.enc = (*Buffer).enc_slice_ref_struct_message + p.dec = (*Buffer).dec_slice_ref_struct_message + p.size = size_slice_ref_struct_message + if p.Wire != "bytes" { + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 0000000000..5a5fd93f7c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 0000000000..d63732fcbd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if pv.Type().Implements(extensionRangeType) { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + props.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), props) + props.StdTime = true + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + props.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), props) + props.StdDuration = true + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 0000000000..1d6c6aa0e4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 0000000000..9db12e9601 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1013 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 0000000000..9324f6542b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 0000000000..d427647436 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,229 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} + +func (o *Buffer) decTimestamp() (time.Time, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return time.Time{}, err + } + tproto := ×tamp{} + if err := Unmarshal(b, tproto); err != nil { + return time.Time{}, err + } + return timestampFromProto(tproto) +} + +func (o *Buffer) dec_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setPtrCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) + var zero field + setPtrCustomType(newBas, zero, &t) + return nil +} + +func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) + var zero field + setCustomType(newBas, zero, &t) + return nil +} + +func size_time(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_time(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_time(p *Properties, base structPointer) (n int) { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return 0 + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return errRepeatedHasNil + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 2b30f84626..8b84d1b22d 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int { // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) } func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 61f83c1e10..5e14513f28 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -865,7 +865,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) + fv.SetUint(x) return nil } case reflect.Uint64: diff --git a/vendor/github.com/google/go-github/github/github-accessors.go b/vendor/github.com/google/go-github/github/github-accessors.go index 0e6d14440f..5368001033 100644 --- a/vendor/github.com/google/go-github/github/github-accessors.go +++ b/vendor/github.com/google/go-github/github/github-accessors.go @@ -3516,6 +3516,14 @@ func (p *Project) GetURL() string { return *p.URL } +// GetColumnID returns the ColumnID field if it's non-nil, zero value otherwise. +func (p *ProjectCard) GetColumnID() int { + if p == nil || p.ColumnID == nil { + return 0 + } + return *p.ColumnID +} + // GetColumnURL returns the ColumnURL field if it's non-nil, zero value otherwise. func (p *ProjectCard) GetColumnURL() string { if p == nil || p.ColumnURL == nil { @@ -3564,6 +3572,14 @@ func (p *ProjectCard) GetUpdatedAt() Timestamp { return *p.UpdatedAt } +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (p *ProjectCard) GetURL() string { + if p == nil || p.URL == nil { + return "" + } + return *p.URL +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (p *ProjectCardEvent) GetAction() string { if p == nil || p.Action == nil { @@ -4156,6 +4172,14 @@ func (p *PullRequestReviewRequest) GetEvent() string { return *p.Event } +// GetDismissStaleReviews returns the DismissStaleReviews field if it's non-nil, zero value otherwise. +func (p *PullRequestReviewsEnforcementUpdate) GetDismissStaleReviews() bool { + if p == nil || p.DismissStaleReviews == nil { + return false + } + return *p.DismissStaleReviews +} + // GetBase returns the Base field if it's non-nil, zero value otherwise. func (p *pullRequestUpdate) GetBase() string { if p == nil || p.Base == nil { diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go index 1fc22976d2..5f066cf95a 100644 --- a/vendor/github.com/google/go-github/github/github.go +++ b/vendor/github.com/google/go-github/github/github.go @@ -315,6 +315,7 @@ type Response struct { } // newResponse creates a new Response for the provided http.Response. +// r must not be nil. func newResponse(r *http.Response) *Response { response := &Response{Response: r} response.populatePageValues() @@ -530,9 +531,9 @@ type RateLimitError struct { } func (r *RateLimitError) Error() string { - return fmt.Sprintf("%v %v: %d %v; rate reset in %v", + return fmt.Sprintf("%v %v: %d %v %v", r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), - r.Response.StatusCode, r.Message, r.Rate.Reset.Time.Sub(time.Now())) + r.Response.StatusCode, r.Message, formatRateReset(r.Rate.Reset.Time.Sub(time.Now()))) } // AcceptedError occurs when GitHub returns 202 Accepted response with an @@ -880,6 +881,31 @@ func cloneRequest(r *http.Request) *http.Request { return r2 } +// formatRateReset formats d to look like "[rate reset in 2s]" or +// "[rate reset in 87m02s]" for the positive durations. And like "[rate limit was reset 87m02s ago]" +// for the negative cases. +func formatRateReset(d time.Duration) string { + isNegative := d < 0 + if isNegative { + d *= -1 + } + secondsTotal := int(0.5 + d.Seconds()) + minutes := secondsTotal / 60 + seconds := secondsTotal - minutes*60 + + var timeString string + if minutes > 0 { + timeString = fmt.Sprintf("%dm%02ds", minutes, seconds) + } else { + timeString = fmt.Sprintf("%ds", seconds) + } + + if isNegative { + return fmt.Sprintf("[rate limit was reset %v ago]", timeString) + } + return fmt.Sprintf("[rate reset in %v]", timeString) +} + // Bool is a helper routine that allocates a new bool value // to store v and returns a pointer to it. func Bool(v bool) *bool { return &v } diff --git a/vendor/github.com/google/go-github/github/orgs.go b/vendor/github.com/google/go-github/github/orgs.go index 8b126f00f1..e6947c96cf 100644 --- a/vendor/github.com/google/go-github/github/orgs.go +++ b/vendor/github.com/google/go-github/github/orgs.go @@ -154,6 +154,25 @@ func (s *OrganizationsService) Get(ctx context.Context, org string) (*Organizati return organization, resp, nil } +// GetByID fetches an organization. +// +// Note: GetByID uses the undocumented GitHub API endpoint /organizations/:id. +func (s *OrganizationsService) GetByID(ctx context.Context, id int) (*Organization, *Response, error) { + u := fmt.Sprintf("organizations/%d", id) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + organization := new(Organization) + resp, err := s.client.Do(ctx, req, organization) + if err != nil { + return nil, resp, err + } + + return organization, resp, nil +} + // Edit an organization. // // GitHub API docs: https://developer.github.com/v3/orgs/#edit-an-organization diff --git a/vendor/github.com/google/go-github/github/projects.go b/vendor/github.com/google/go-github/github/projects.go index 58b638eb8c..11c885f99d 100644 --- a/vendor/github.com/google/go-github/github/projects.go +++ b/vendor/github.com/google/go-github/github/projects.go @@ -259,14 +259,19 @@ func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int, o // ProjectCard represents a card in a column of a GitHub Project. // -// GitHub API docs: https://developer.github.com/v3/repos/projects/ +// GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card type ProjectCard struct { + URL *string `json:"url,omitempty"` ColumnURL *string `json:"column_url,omitempty"` ContentURL *string `json:"content_url,omitempty"` ID *int `json:"id,omitempty"` Note *string `json:"note,omitempty"` + Creator *User `json:"creator,omitempty"` CreatedAt *Timestamp `json:"created_at,omitempty"` UpdatedAt *Timestamp `json:"updated_at,omitempty"` + + // The following fields are only populated by Webhook events. + ColumnID *int `json:"column_id,omitempty"` } // ListProjectCards lists the cards in a column of a GitHub Project. diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go index c3629daa5c..38bbc15d5f 100644 --- a/vendor/github.com/google/go-github/github/repos.go +++ b/vendor/github.com/google/go-github/github/repos.go @@ -7,6 +7,7 @@ package github import ( "context" + "encoding/json" "fmt" "strings" ) @@ -532,25 +533,22 @@ type Branch struct { // Protection represents a repository branch's protection. type Protection struct { - RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` - RequiredPullRequestReviews *RequiredPullRequestReviews `json:"required_pull_request_reviews"` - EnforceAdmins *AdminEnforcement `json:"enforce_admins"` - Restrictions *BranchRestrictions `json:"restrictions"` + RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` + RequiredPullRequestReviews *PullRequestReviewsEnforcement `json:"required_pull_request_reviews"` + EnforceAdmins *AdminEnforcement `json:"enforce_admins"` + Restrictions *BranchRestrictions `json:"restrictions"` } // ProtectionRequest represents a request to create/edit a branch's protection. type ProtectionRequest struct { - RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` - RequiredPullRequestReviews *RequiredPullRequestReviews `json:"required_pull_request_reviews"` - EnforceAdmins bool `json:"enforce_admins"` - Restrictions *BranchRestrictionsRequest `json:"restrictions"` + RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` + RequiredPullRequestReviews *PullRequestReviewsEnforcementRequest `json:"required_pull_request_reviews"` + EnforceAdmins bool `json:"enforce_admins"` + Restrictions *BranchRestrictionsRequest `json:"restrictions"` } // RequiredStatusChecks represents the protection status of a individual branch. type RequiredStatusChecks struct { - // Enforce required status checks for repository administrators. (Required.) - // Deprecated: Use EnforceAdmins instead. - IncludeAdmins bool `json:"include_admins"` // Require branches to be up to date before merging. (Required.) Strict bool `json:"strict"` // The list of status checks to require in order to merge into this @@ -558,11 +556,55 @@ type RequiredStatusChecks struct { Contexts []string `json:"contexts"` } -// RequiredPullRequestReviews represents the protection configuration for pull requests. -type RequiredPullRequestReviews struct { - // Enforce pull request reviews for repository administrators. (Required.) - // Deprecated: Use EnforceAdmins instead. - IncludeAdmins bool `json:"include_admins"` +// PullRequestReviewsEnforcement represents the pull request reviews enforcement of a protected branch. +type PullRequestReviewsEnforcement struct { + // Specifies which users and teams can dismiss pull requets reviews. + DismissalRestrictions DismissalRestrictions `json:"dismissal_restrictions"` + // Specifies if approved reviews are dismissed automatically, when a new commit is pushed. + DismissStaleReviews bool `json:"dismiss_stale_reviews"` +} + +// PullRequestReviewsEnforcementRequest represents request to set the pull request review +// enforcement of a protected branch. It is separate from PullRequestReviewsEnforcement above +// because the request structure is different from the response structure. +type PullRequestReviewsEnforcementRequest struct { + // Specifies which users and teams should be allowed to dismiss pull requets reviews. Can be nil to disable the restrictions. + DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions"` + // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. (Required) + DismissStaleReviews bool `json:"dismiss_stale_reviews"` +} + +// MarshalJSON implements the json.Marshaler interface. +// Converts nil value of PullRequestReviewsEnforcementRequest.DismissalRestrictionsRequest to empty array +func (req PullRequestReviewsEnforcementRequest) MarshalJSON() ([]byte, error) { + if req.DismissalRestrictionsRequest == nil { + newReq := struct { + R []interface{} `json:"dismissal_restrictions"` + D bool `json:"dismiss_stale_reviews"` + }{ + R: []interface{}{}, + D: req.DismissStaleReviews, + } + return json.Marshal(newReq) + } + newReq := struct { + R *DismissalRestrictionsRequest `json:"dismissal_restrictions"` + D bool `json:"dismiss_stale_reviews"` + }{ + R: req.DismissalRestrictionsRequest, + D: req.DismissStaleReviews, + } + return json.Marshal(newReq) +} + +// PullRequestReviewsEnforcementUpdate represents request to patch the pull request review +// enforcement of a protected branch. It is separate from PullRequestReviewsEnforcementRequest above +// because the patch request does not require all fields to be initialized. +type PullRequestReviewsEnforcementUpdate struct { + // Specifies which users and teams can dismiss pull requets reviews. Can be ommitted. + DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"` + // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. Can be ommited. + DismissStaleReviews *bool `json:"dismiss_stale_reviews,omitempty"` } // AdminEnforcement represents the configuration to enforce required status checks for repository administrators. @@ -591,6 +633,25 @@ type BranchRestrictionsRequest struct { Teams []string `json:"teams"` } +// DismissalRestrictions specifies which users and teams can dismiss pull request reviews. +type DismissalRestrictions struct { + // The list of users who can dimiss pull request reviews. + Users []*User `json:"users"` + // The list of teams which can dismiss pull request reviews. + Teams []*Team `json:"teams"` +} + +// DismissalRestrictionsRequest represents the request to create/edit the +// restriction to allows only specific users or teams to dimiss pull request reviews. It is +// separate from DismissalRestrictions above because the request structure is +// different from the response structure. +type DismissalRestrictionsRequest struct { + // The list of user logins who can dismiss pull request reviews. (Required; use []string{} instead of nil for empty list.) + Users []string `json:"users"` + // The list of team slugs which can dismiss pull request reviews. (Required; use []string{} instead of nil for empty list.) + Teams []string `json:"teams"` +} + // ListBranches lists branches for the specified repository. // // GitHub API docs: https://developer.github.com/v3/repos/#list-branches @@ -761,3 +822,153 @@ func (s *RepositoriesService) License(ctx context.Context, owner, repo string) ( return r, resp, nil } + +// GetPullRequestReviewEnforcement gets pull request review enforcement of a protected branch. +// +// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-pull-request-review-enforcement-of-protected-branch +func (s *RepositoriesService) GetPullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches + req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) + + r := new(PullRequestReviewsEnforcement) + resp, err := s.client.Do(ctx, req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// UpdatePullRequestReviewEnforcement patches pull request review enforcement of a protected branch. +// It requires admin access and branch protection to be enabled. +// +// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-pull-request-review-enforcement-of-protected-branch +func (s *RepositoriesService) UpdatePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string, patch *PullRequestReviewsEnforcementUpdate) (*PullRequestReviewsEnforcement, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) + req, err := s.client.NewRequest("PATCH", u, patch) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches + req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) + + r := new(PullRequestReviewsEnforcement) + resp, err := s.client.Do(ctx, req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, err +} + +// DisableDismissalRestrictions disables dismissal restrictions of a protected branch. +// It requires admin access and branch protection to be enabled. +// +// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-pull-request-review-enforcement-of-protected-branch +func (s *RepositoriesService) DisableDismissalRestrictions(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) + + data := struct { + R []interface{} `json:"dismissal_restrictions"` + }{[]interface{}{}} + + req, err := s.client.NewRequest("PATCH", u, data) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches + req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) + + r := new(PullRequestReviewsEnforcement) + resp, err := s.client.Do(ctx, req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, err +} + +// RemovePullRequestReviewEnforcement removes pull request enforcement of a protected branch. +// +// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-pull-request-review-enforcement-of-protected-branch +func (s *RepositoriesService) RemovePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + // TODO: remove custom Accept header when this API fully launches + req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) + + return s.client.Do(ctx, req, nil) +} + +// GetAdminEnforcement gets admin enforcement information of a protected branch. +// +// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-admin-enforcement-of-protected-branch +func (s *RepositoriesService) GetAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches + req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) + + r := new(AdminEnforcement) + resp, err := s.client.Do(ctx, req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// AddAdminEnforcement adds admin enforcement to a protected branch. +// It requires admin access and branch protection to be enabled. +// +// GitHub API docs: https://developer.github.com/v3/repos/branches/#add-admin-enforcement-of-protected-branch +func (s *RepositoriesService) AddAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches + req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) + + r := new(AdminEnforcement) + resp, err := s.client.Do(ctx, req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, err +} + +// RemoveAdminEnforcement removes admin enforcement from a protected branch. +// +// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-admin-enforcement-of-protected-branch +func (s *RepositoriesService) RemoveAdminEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + // TODO: remove custom Accept header when this API fully launches + req.Header.Set("Accept", mediaTypeProtectedBranchesPreview) + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/github/repos_collaborators.go b/vendor/github.com/google/go-github/github/repos_collaborators.go index 76e8a1f0a1..61b63318f4 100644 --- a/vendor/github.com/google/go-github/github/repos_collaborators.go +++ b/vendor/github.com/google/go-github/github/repos_collaborators.go @@ -91,7 +91,8 @@ type RepositoryAddCollaboratorOptions struct { Permission string `json:"permission,omitempty"` } -// AddCollaborator adds the specified GitHub user as collaborator to the given repo. +// AddCollaborator sends an invitation to the specified GitHub user +// to become a collaborator to the given repo. // // GitHub API docs: https://developer.github.com/v3/repos/collaborators/#add-user-as-a-collaborator func (s *RepositoriesService) AddCollaborator(ctx context.Context, owner, repo, user string, opt *RepositoryAddCollaboratorOptions) (*Response, error) { diff --git a/vendor/github.com/google/go-github/github/users_gpg_keys.go b/vendor/github.com/google/go-github/github/users_gpg_keys.go index be88c042ab..3e95fb4460 100644 --- a/vendor/github.com/google/go-github/github/users_gpg_keys.go +++ b/vendor/github.com/google/go-github/github/users_gpg_keys.go @@ -43,7 +43,7 @@ type GPGEmail struct { // ListGPGKeys lists the public GPG keys for a user. Passing the empty // string will fetch keys for the authenticated user. It requires authentication // via Basic Auth or via OAuth with at least read:gpg_key scope. - +// // GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#list-gpg-keys-for-a-user func (s *UsersService) ListGPGKeys(ctx context.Context, user string, opt *ListOptions) ([]*GPGKey, *Response, error) { var u string diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 34cfaa7ee6..605592db97 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -442,7 +442,7 @@ func (a *Agent) DisableNodeMaintenance() error { // Monitor returns a channel which will receive streaming logs from the agent // Providing a non-nil stopCh can be used to close the connection and stop the // log stream -func (a *Agent) Monitor(loglevel string, stopCh chan struct{}, q *QueryOptions) (chan string, error) { +func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { r := a.c.newRequest("GET", "/v1/agent/monitor") r.setQueryOptions(q) if loglevel != "" { diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 1c9f1e2b71..0a62b4f68d 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -105,6 +105,26 @@ type QueryOptions struct { // relayed back to the sender through N other random nodes. Must be // a value from 0 to 5 (inclusive). RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *QueryOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { + o2 := new(QueryOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 } // WriteOptions are used to parameterize a write @@ -121,6 +141,26 @@ type WriteOptions struct { // relayed back to the sender through N other random nodes. Must be // a value from 0 to 5 (inclusive). RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *WriteOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { + o2 := new(WriteOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 } // QueryMeta is used to return meta data about a query @@ -457,6 +497,7 @@ type request struct { body io.Reader header http.Header obj interface{} + ctx context.Context } // setQueryOptions is used to annotate the request with @@ -494,6 +535,7 @@ func (r *request) setQueryOptions(q *QueryOptions) { if q.RelayFactor != 0 { r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) } + r.ctx = q.ctx } // durToMsec converts a duration to a millisecond specified string. If the @@ -538,6 +580,7 @@ func (r *request) setWriteOptions(q *WriteOptions) { if q.RelayFactor != 0 { r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) } + r.ctx = q.ctx } // toHTTP converts the request to an HTTP request @@ -569,8 +612,11 @@ func (r *request) toHTTP() (*http.Request, error) { if r.config.HttpAuth != nil { req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) } - - return req, nil + if r.ctx != nil { + return req.WithContext(r.ctx), nil + } else { + return req, nil + } } // newRequest is used to create a new request diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go index 36e99a389e..1613f11a60 100644 --- a/vendor/github.com/hashicorp/consul/api/session.go +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -145,7 +145,9 @@ func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, // RenewPeriodic is used to periodically invoke Session.Renew on a // session until a doneCh is closed. This is meant to be used in a long running // goroutine to ensure a session stays valid. -func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error { +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { + ctx := q.Context() + ttl, err := time.ParseDuration(initialTTL) if err != nil { return err @@ -179,6 +181,11 @@ func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, d // Attempt a session destroy s.Destroy(id, q) return nil + + case <-ctx.Done(): + // Bail immediately since attempting the destroy would + // use the canceled context in q, which would just bail. + return ctx.Err() } } } diff --git a/vendor/github.com/hashicorp/go-msgpack/LICENSE b/vendor/github.com/hashicorp/go-msgpack/LICENSE deleted file mode 100644 index ccae99f6a9..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2012, 2013 Ugorji Nwoke. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the author nor the names of its contributors may be used - to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go deleted file mode 100644 index c14d810a73..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -/* -High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -Representative Benchmark Results - -Run the benchmark suite using: - go test -bi -bench=. -benchmem - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext_dep_test.go - -*/ -package codec diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/README.md b/vendor/github.com/hashicorp/go-msgpack/codec/README.md deleted file mode 100644 index 6c95d1bfd2..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/README.md +++ /dev/null @@ -1,174 +0,0 @@ -# Codec - -High Performance and Feature-Rich Idiomatic Go Library providing -encode/decode support for different serialization formats. - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -Online documentation: [http://godoc.org/github.com/ugorji/go/codec] - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -## Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -## RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -## Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -## Representative Benchmark Results - -A sample run of benchmark using "go test -bi -bench=. -benchmem": - - /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) - - .............................................. - BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT - To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." - Benchmark: - Struct recursive Depth: 1 - ApproxDeepSize Of benchmark Struct: 4694 bytes - Benchmark One-Pass Run: - v-msgpack: len: 1600 bytes - bson: len: 3025 bytes - msgpack: len: 1560 bytes - binc: len: 1187 bytes - gob: len: 1972 bytes - json: len: 2538 bytes - .............................................. - PASS - Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op - Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op - Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op - Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op - Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op - Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op - Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op - Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op - Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op - Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op - Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op - Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op - Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op - Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op - ok ugorji.net/codec 30.827s - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext\_dep\_test.go - diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go deleted file mode 100644 index 2bb5e8fee8..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go +++ /dev/null @@ -1,786 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "math" - // "reflect" - // "sync/atomic" - "time" - //"fmt" -) - -const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. - -//var _ = fmt.Printf - -// vd as low 4 bits (there are 16 slots) -const ( - bincVdSpecial byte = iota - bincVdPosInt - bincVdNegInt - bincVdFloat - - bincVdString - bincVdByteArray - bincVdArray - bincVdMap - - bincVdTimestamp - bincVdSmallInt - bincVdUnicodeOther - bincVdSymbol - - bincVdDecimal - _ // open slot - _ // open slot - bincVdCustomExt = 0x0f -) - -const ( - bincSpNil byte = iota - bincSpFalse - bincSpTrue - bincSpNan - bincSpPosInf - bincSpNegInf - bincSpZeroFloat - bincSpZero - bincSpNegOne -) - -const ( - bincFlBin16 byte = iota - bincFlBin32 - _ // bincFlBin32e - bincFlBin64 - _ // bincFlBin64e - // others not currently supported -) - -type bincEncDriver struct { - w encWriter - m map[string]uint16 // symbols - s uint32 // symbols sequencer - b [8]byte -} - -func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - bs := encodeTime(v.(time.Time)) - e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) - e.w.writeb(bs) - } -} - -func (e *bincEncDriver) encodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) -} - -func (e *bincEncDriver) encodeBool(b bool) { - if b { - e.w.writen1(bincVdSpecial<<4 | bincSpTrue) - } else { - e.w.writen1(bincVdSpecial<<4 | bincSpFalse) - } -} - -func (e *bincEncDriver) encodeFloat32(f float32) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - e.w.writen1(bincVdFloat<<4 | bincFlBin32) - e.w.writeUint32(math.Float32bits(f)) -} - -func (e *bincEncDriver) encodeFloat64(f float64) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - bigen.PutUint64(e.b[:], math.Float64bits(f)) - if bincDoPrune { - i := 7 - for ; i >= 0 && (e.b[i] == 0); i-- { - } - i++ - if i <= 6 { - e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) - e.w.writen1(byte(i)) - e.w.writeb(e.b[:i]) - return - } - } - e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:]) -} - -func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { - if lim == 4 { - bigen.PutUint32(e.b[:lim], uint32(v)) - } else { - bigen.PutUint64(e.b[:lim], v) - } - if bincDoPrune { - i := pruneSignExt(e.b[:lim], pos) - e.w.writen1(bd | lim - 1 - byte(i)) - e.w.writeb(e.b[i:lim]) - } else { - e.w.writen1(bd | lim - 1) - e.w.writeb(e.b[:lim]) - } -} - -func (e *bincEncDriver) encodeInt(v int64) { - const nbd byte = bincVdNegInt << 4 - switch { - case v >= 0: - e.encUint(bincVdPosInt<<4, true, uint64(v)) - case v == -1: - e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - default: - e.encUint(bincVdNegInt<<4, false, uint64(-v)) - } -} - -func (e *bincEncDriver) encodeUint(v uint64) { - e.encUint(bincVdPosInt<<4, true, v) -} - -func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - switch { - case v == 0: - e.w.writen1(bincVdSpecial<<4 | bincSpZero) - case pos && v >= 1 && v <= 16: - e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - case v <= math.MaxUint8: - e.w.writen2(bd|0x0, byte(v)) - case v <= math.MaxUint16: - e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: - e.encIntegerPrune(bd, pos, v, 4) - default: - e.encIntegerPrune(bd, pos, v, 8) - } -} - -func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(bincVdCustomExt<<4, uint64(length)) - e.w.writen1(xtag) -} - -func (e *bincEncDriver) encodeArrayPreamble(length int) { - e.encLen(bincVdArray<<4, uint64(length)) -} - -func (e *bincEncDriver) encodeMapPreamble(length int) { - e.encLen(bincVdMap<<4, uint64(length)) -} - -func (e *bincEncDriver) encodeString(c charEncoding, v string) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) encodeSymbol(v string) { - // if WriteSymbolsNoRefs { - // e.encodeString(c_UTF8, v) - // return - // } - - //symbols only offer benefit when string length > 1. - //This is because strings with length 1 take only 2 bytes to store - //(bd with embedded length, and single byte for string val). - - l := len(v) - switch l { - case 0: - e.encBytesLen(c_UTF8, 0) - return - case 1: - e.encBytesLen(c_UTF8, 1) - e.w.writen1(v[0]) - return - } - if e.m == nil { - e.m = make(map[string]uint16, 16) - } - ui, ok := e.m[v] - if ok { - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8) - e.w.writeUint16(ui) - } - } else { - e.s++ - ui = uint16(e.s) - //ui = uint16(atomic.AddUint32(&e.s, 1)) - e.m[v] = ui - var lenprec uint8 - switch { - case l <= math.MaxUint8: - // lenprec = 0 - case l <= math.MaxUint16: - lenprec = 1 - case int64(l) <= math.MaxUint32: - lenprec = 2 - default: - lenprec = 3 - } - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - e.w.writeUint16(ui) - } - switch lenprec { - case 0: - e.w.writen1(byte(l)) - case 1: - e.w.writeUint16(uint16(l)) - case 2: - e.w.writeUint32(uint32(l)) - default: - e.w.writeUint64(uint64(l)) - } - e.w.writestr(v) - } -} - -func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writeb(v) - } -} - -func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { - //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == c_RAW { - e.encLen(bincVdByteArray<<4, length) - } else { - e.encLen(bincVdString<<4, length) - } -} - -func (e *bincEncDriver) encLen(bd byte, l uint64) { - if l < 12 { - e.w.writen1(bd | uint8(l+4)) - } else { - e.encLenNumber(bd, l) - } -} - -func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - switch { - case v <= math.MaxUint8: - e.w.writen2(bd, byte(v)) - case v <= math.MaxUint16: - e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: - e.w.writen1(bd | 0x02) - e.w.writeUint32(uint32(v)) - default: - e.w.writen1(bd | 0x03) - e.w.writeUint64(uint64(v)) - } -} - -//------------------------------------ - -type bincDecDriver struct { - r decReader - bdRead bool - bdType valueType - bd byte - vd byte - vs byte - b [8]byte - m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) -} - -func (d *bincDecDriver) initReadNext() { - if d.bdRead { - return - } - d.bd = d.r.readn1() - d.vd = d.bd >> 4 - d.vs = d.bd & 0x0f - d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *bincDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - d.bdType = valueTypeNil - case bincSpFalse, bincSpTrue: - d.bdType = valueTypeBool - case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: - d.bdType = valueTypeFloat - case bincSpZero: - d.bdType = valueTypeUint - case bincSpNegOne: - d.bdType = valueTypeInt - default: - decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - d.bdType = valueTypeUint - case bincVdPosInt: - d.bdType = valueTypeUint - case bincVdNegInt: - d.bdType = valueTypeInt - case bincVdFloat: - d.bdType = valueTypeFloat - case bincVdString: - d.bdType = valueTypeString - case bincVdSymbol: - d.bdType = valueTypeSymbol - case bincVdByteArray: - d.bdType = valueTypeBytes - case bincVdTimestamp: - d.bdType = valueTypeTimestamp - case bincVdCustomExt: - d.bdType = valueTypeExt - case bincVdArray: - d.bdType = valueTypeArray - case bincVdMap: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) - } - } - return d.bdType -} - -func (d *bincDecDriver) tryDecodeAsNil() bool { - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return true - } - return false -} - -func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - if d.vd != bincVdTimestamp { - decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) - } - tt, err := decodeTime(d.r.readn(int(d.vs))) - if err != nil { - panic(err) - } - var vt *time.Time = v.(*time.Time) - *vt = tt - d.bdRead = false - } -} - -func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { - if vs&0x8 == 0 { - d.r.readb(d.b[0:defaultLen]) - } else { - l := d.r.readn1() - if l > 8 { - decErr("At most 8 bytes used to represent float. Received: %v bytes", l) - } - for i := l; i < 8; i++ { - d.b[i] = 0 - } - d.r.readb(d.b[0:l]) - } -} - -func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(d.r.readUint64()); break; } - switch vs := d.vs; vs & 0x7 { - case bincFlBin32: - d.decFloatPre(vs, 4) - f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - case bincFlBin64: - d.decFloatPre(vs, 8) - f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - default: - decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) - } - return -} - -func (d *bincDecDriver) decUint() (v uint64) { - // need to inline the code (interface conversion and type assertion expensive) - switch d.vs { - case 0: - v = uint64(d.r.readn1()) - case 1: - d.r.readb(d.b[6:]) - v = uint64(bigen.Uint16(d.b[6:])) - case 2: - d.b[4] = 0 - d.r.readb(d.b[5:]) - v = uint64(bigen.Uint32(d.b[4:])) - case 3: - d.r.readb(d.b[4:]) - v = uint64(bigen.Uint32(d.b[4:])) - case 4, 5, 6: - lim := int(7 - d.vs) - d.r.readb(d.b[lim:]) - for i := 0; i < lim; i++ { - d.b[i] = 0 - } - v = uint64(bigen.Uint64(d.b[:])) - case 7: - d.r.readb(d.b[:]) - v = uint64(bigen.Uint64(d.b[:])) - default: - decErr("unsigned integers with greater than 64 bits of precision not supported") - } - return -} - -func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { - switch d.vd { - case bincVdPosInt: - ui = d.decUint() - i = int64(ui) - case bincVdNegInt: - ui = d.decUint() - i = -(int64(ui)) - neg = true - case bincVdSmallInt: - i = int64(d.vs) + 1 - ui = uint64(d.vs) + 1 - case bincVdSpecial: - switch d.vs { - case bincSpZero: - //i = 0 - case bincSpNegOne: - neg = true - ui = 1 - i = -1 - default: - decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) - } - default: - decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) - } - return -} - -func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() - if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - checkOverflow(ui, 0, bitsize) - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.vd { - case bincVdSpecial: - d.bdRead = false - switch d.vs { - case bincSpNan: - return math.NaN() - case bincSpPosInf: - return math.Inf(1) - case bincSpZeroFloat, bincSpZero: - return - case bincSpNegInf: - return math.Inf(-1) - default: - decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) - } - case bincVdFloat: - f = d.decFloat() - default: - _, i, _ := d.decIntAny() - f = float64(i) - } - checkOverflowFloat32(f, chkOverflow32) - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *bincDecDriver) decodeBool() (b bool) { - switch d.bd { - case (bincVdSpecial | bincSpFalse): - // b = false - case (bincVdSpecial | bincSpTrue): - b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) readMapLen() (length int) { - if d.vd != bincVdMap { - decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) readArrayLen() (length int) { - if d.vd != bincVdArray { - decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) decLen() int { - if d.vs <= 3 { - return int(d.decUint()) - } - return int(d.vs - 4) -} - -func (d *bincDecDriver) decodeString() (s string) { - switch d.vd { - case bincVdString, bincVdByteArray: - if length := d.decLen(); length > 0 { - s = string(d.r.readn(length)) - } - case bincVdSymbol: - //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, - //extract symbol - //if containsStringVal, read it and put in map - //else look in map for string value - var symbol uint32 - vs := d.vs - //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) - if vs&0x8 == 0 { - symbol = uint32(d.r.readn1()) - } else { - symbol = uint32(d.r.readUint16()) - } - if d.m == nil { - d.m = make(map[uint32]string, 16) - } - - if vs&0x4 == 0 { - s = d.m[symbol] - } else { - var slen int - switch vs & 0x3 { - case 0: - slen = int(d.r.readn1()) - case 1: - slen = int(d.r.readUint16()) - case 2: - slen = int(d.r.readUint32()) - case 3: - slen = int(d.r.readUint64()) - } - s = string(d.r.readn(slen)) - d.m[symbol] = s - } - default: - decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, bincVdSymbol, d.vd) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - var clen int - switch d.vd { - case bincVdString, bincVdByteArray: - clen = d.decLen() - default: - decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, d.vd) - } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - switch d.vd { - case bincVdCustomExt: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - xbs = d.r.readn(l) - case bincVdByteArray: - xbs, _ = d.decodeBytes(nil) - default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() - - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - vt = valueTypeNil - case bincSpFalse: - vt = valueTypeBool - v = false - case bincSpTrue: - vt = valueTypeBool - v = true - case bincSpNan: - vt = valueTypeFloat - v = math.NaN() - case bincSpPosInf: - vt = valueTypeFloat - v = math.Inf(1) - case bincSpNegInf: - vt = valueTypeFloat - v = math.Inf(-1) - case bincSpZeroFloat: - vt = valueTypeFloat - v = float64(0) - case bincSpZero: - vt = valueTypeUint - v = int64(0) // int8(0) - case bincSpNegOne: - vt = valueTypeInt - v = int64(-1) // int8(-1) - default: - decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - vt = valueTypeUint - v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 - case bincVdPosInt: - vt = valueTypeUint - v = d.decUint() - case bincVdNegInt: - vt = valueTypeInt - v = -(int64(d.decUint())) - case bincVdFloat: - vt = valueTypeFloat - v = d.decFloat() - case bincVdSymbol: - vt = valueTypeSymbol - v = d.decodeString() - case bincVdString: - vt = valueTypeString - v = d.decodeString() - case bincVdByteArray: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) - case bincVdTimestamp: - vt = valueTypeTimestamp - tt, err := decodeTime(d.r.readn(int(d.vs))) - if err != nil { - panic(err) - } - v = tt - case bincVdCustomExt: - vt = valueTypeExt - l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt - case bincVdArray: - vt = valueTypeArray - decodeFurther = true - case bincVdMap: - vt = valueTypeMap - decodeFurther = true - default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -//BincHandle is a Handle for the Binc Schema-Free Encoding Format -//defined at https://github.com/ugorji/binc . -// -//BincHandle currently supports all Binc features with the following EXCEPTIONS: -// - only integers up to 64 bits of precision are supported. -// big integers are unsupported. -// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). -// extended precision and decimal IEEE 754 floats are unsupported. -// - Only UTF-8 strings supported. -// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. -//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. -type BincHandle struct { - BasicHandle -} - -func (h *BincHandle) newEncDriver(w encWriter) encDriver { - return &bincEncDriver{w: w} -} - -func (h *BincHandle) newDecDriver(r decReader) decDriver { - return &bincDecDriver{r: r} -} - -func (_ *BincHandle) writeExt() bool { - return true -} - -func (h *BincHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go deleted file mode 100644 index 87bef2b935..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go +++ /dev/null @@ -1,1048 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "io" - "reflect" - // "runtime/debug" -) - -// Some tagging information for error messages. -const ( - msgTagDec = "codec.decoder" - msgBadDesc = "Unrecognized descriptor byte" - msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" -) - -// decReader abstracts the reading source, allowing implementations that can -// read from an io.Reader or directly off a byte slice with zero-copying. -type decReader interface { - readn(n int) []byte - readb([]byte) - readn1() uint8 - readUint16() uint16 - readUint32() uint32 - readUint64() uint64 -} - -type decDriver interface { - initReadNext() - tryDecodeAsNil() bool - currentEncodedType() valueType - isBuiltinType(rt uintptr) bool - decodeBuiltin(rt uintptr, v interface{}) - //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - decodeNaked() (v interface{}, vt valueType, decodeFurther bool) - decodeInt(bitsize uint8) (i int64) - decodeUint(bitsize uint8) (ui uint64) - decodeFloat(chkOverflow32 bool) (f float64) - decodeBool() (b bool) - // decodeString can also decode symbols - decodeString() (s string) - decodeBytes(bs []byte) (bsOut []byte, changed bool) - decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - readMapLen() int - readArrayLen() int -} - -type DecodeOptions struct { - // An instance of MapType is used during schema-less decoding of a map in the stream. - // If nil, we use map[interface{}]interface{} - MapType reflect.Type - // An instance of SliceType is used during schema-less decoding of an array in the stream. - // If nil, we use []interface{} - SliceType reflect.Type - // ErrorIfNoField controls whether an error is returned when decoding a map - // from a codec stream into a struct, and no matching struct field is found. - ErrorIfNoField bool -} - -// ------------------------------------ - -// ioDecReader is a decReader that reads off an io.Reader -type ioDecReader struct { - r io.Reader - br io.ByteReader - x [8]byte //temp byte array re-used internally for efficiency -} - -func (z *ioDecReader) readn(n int) (bs []byte) { - if n <= 0 { - return - } - bs = make([]byte, n) - if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { - panic(err) - } - return -} - -func (z *ioDecReader) readb(bs []byte) { - if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { - panic(err) - } -} - -func (z *ioDecReader) readn1() uint8 { - if z.br != nil { - b, err := z.br.ReadByte() - if err != nil { - panic(err) - } - return b - } - z.readb(z.x[:1]) - return z.x[0] -} - -func (z *ioDecReader) readUint16() uint16 { - z.readb(z.x[:2]) - return bigen.Uint16(z.x[:2]) -} - -func (z *ioDecReader) readUint32() uint32 { - z.readb(z.x[:4]) - return bigen.Uint32(z.x[:4]) -} - -func (z *ioDecReader) readUint64() uint64 { - z.readb(z.x[:8]) - return bigen.Uint64(z.x[:8]) -} - -// ------------------------------------ - -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c int // cursor - a int // available -} - -func (z *bytesDecReader) consume(n int) (oldcursor int) { - if z.a == 0 { - panic(io.EOF) - } - if n > z.a { - decErr("Trying to read %v bytes. Only %v available", n, z.a) - } - // z.checkAvailable(n) - oldcursor = z.c - z.c = oldcursor + n - z.a = z.a - n - return -} - -func (z *bytesDecReader) readn(n int) (bs []byte) { - if n <= 0 { - return - } - c0 := z.consume(n) - bs = z.b[c0:z.c] - return -} - -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readn(len(bs))) -} - -func (z *bytesDecReader) readn1() uint8 { - c0 := z.consume(1) - return z.b[c0] -} - -// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits -// creating temp slice variable and copying it to helper function is expensive -// for just 2 bits. - -func (z *bytesDecReader) readUint16() uint16 { - c0 := z.consume(2) - return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 -} - -func (z *bytesDecReader) readUint32() uint32 { - c0 := z.consume(4) - return bigen.Uint32(z.b[c0:z.c]) -} - -func (z *bytesDecReader) readUint64() uint64 { - c0 := z.consume(8) - return bigen.Uint64(z.b[c0:z.c]) -} - -// ------------------------------------ - -// decFnInfo has methods for registering handling decoding of a specific type -// based on some characteristics (builtin, extension, reflect Kind, etc) -type decFnInfo struct { - ti *typeInfo - d *Decoder - dd decDriver - xfFn func(reflect.Value, []byte) error - xfTag byte - array bool -} - -func (f *decFnInfo) builtin(rv reflect.Value) { - f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) -} - -func (f *decFnInfo) rawExt(rv reflect.Value) { - xtag, xbs := f.dd.decodeExt(false, 0) - rv.Field(0).SetUint(uint64(xtag)) - rv.Field(1).SetBytes(xbs) -} - -func (f *decFnInfo) ext(rv reflect.Value) { - _, xbs := f.dd.decodeExt(true, f.xfTag) - if fnerr := f.xfFn(rv, xbs); fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryUnmarshaler - if f.ti.unmIndir == -1 { - bm = rv.Addr().Interface().(binaryUnmarshaler) - } else if f.ti.unmIndir == 0 { - bm = rv.Interface().(binaryUnmarshaler) - } else { - for j, k := int8(0), f.ti.unmIndir; j < k; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - bm = rv.Interface().(binaryUnmarshaler) - } - xbs, _ := f.dd.decodeBytes(nil) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) kErr(rv reflect.Value) { - decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) -} - -func (f *decFnInfo) kString(rv reflect.Value) { - rv.SetString(f.dd.decodeString()) -} - -func (f *decFnInfo) kBool(rv reflect.Value) { - rv.SetBool(f.dd.decodeBool()) -} - -func (f *decFnInfo) kInt(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(intBitsize)) -} - -func (f *decFnInfo) kInt64(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(64)) -} - -func (f *decFnInfo) kInt32(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(32)) -} - -func (f *decFnInfo) kInt8(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(8)) -} - -func (f *decFnInfo) kInt16(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(16)) -} - -func (f *decFnInfo) kFloat32(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(true)) -} - -func (f *decFnInfo) kFloat64(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(false)) -} - -func (f *decFnInfo) kUint8(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(8)) -} - -func (f *decFnInfo) kUint64(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(64)) -} - -func (f *decFnInfo) kUint(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUint32(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(32)) -} - -func (f *decFnInfo) kUint16(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(16)) -} - -// func (f *decFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") -// if rv.IsNil() { -// rv.Set(reflect.New(rv.Type().Elem())) -// } -// f.d.decodeValue(rv.Elem()) -// } - -func (f *decFnInfo) kInterface(rv reflect.Value) { - // debugf("\t===> kInterface") - if !rv.IsNil() { - f.d.decodeValue(rv.Elem()) - return - } - // nil interface: - // use some hieristics to set the nil interface to an - // appropriate value based on the first byte read (byte descriptor bd) - v, vt, decodeFurther := f.dd.decodeNaked() - if vt == valueTypeNil { - return - } - // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) - // if non-nil value in stream. - if num := f.ti.rt.NumMethod(); num > 0 { - decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", - f.ti.rt, num) - } - var rvn reflect.Value - var useRvn bool - switch vt { - case valueTypeMap: - if f.d.h.MapType == nil { - var m2 map[interface{}]interface{} - v = &m2 - } else { - rvn = reflect.New(f.d.h.MapType).Elem() - useRvn = true - } - case valueTypeArray: - if f.d.h.SliceType == nil { - var m2 []interface{} - v = &m2 - } else { - rvn = reflect.New(f.d.h.SliceType).Elem() - useRvn = true - } - case valueTypeExt: - re := v.(*RawExt) - var bfn func(reflect.Value, []byte) error - rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) - if bfn == nil { - rvn = reflect.ValueOf(*re) - } else if fnerr := bfn(rvn, re.Data); fnerr != nil { - panic(fnerr) - } - rv.Set(rvn) - return - } - if decodeFurther { - if useRvn { - f.d.decodeValue(rvn) - } else if v != nil { - // this v is a pointer, so we need to dereference it when done - f.d.decode(v) - rvn = reflect.ValueOf(v).Elem() - useRvn = true - } - } - if useRvn { - rv.Set(rvn) - } else if v != nil { - rv.Set(reflect.ValueOf(v)) - } -} - -func (f *decFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { - containerLen := f.dd.readMapLen() - if containerLen == 0 { - return - } - tisfi := fti.sfi - for j := 0; j < containerLen; j++ { - // var rvkencname string - // ddecode(&rvkencname) - f.dd.initReadNext() - rvkencname := f.dd.decodeString() - // rvksi := ti.getForEncName(rvkencname) - if k := fti.indexForEncName(rvkencname); k > -1 { - sfik := tisfi[k] - if sfik.i != -1 { - f.d.decodeValue(rv.Field(int(sfik.i))) - } else { - f.d.decEmbeddedField(rv, sfik.is) - } - // f.d.decodeValue(ti.field(k, rv)) - } else { - if f.d.h.ErrorIfNoField { - decErr("No matching struct field found when decoding stream map with key: %v", - rvkencname) - } else { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) - } - } - } - } else if currEncodedType == valueTypeArray { - containerLen := f.dd.readArrayLen() - if containerLen == 0 { - return - } - for j, si := range fti.sfip { - if j == containerLen { - break - } - if si.i != -1 { - f.d.decodeValue(rv.Field(int(si.i))) - } else { - f.d.decEmbeddedField(rv, si.is) - } - } - if containerLen > len(fti.sfip) { - // read remaining values and throw away - for j := len(fti.sfip); j < containerLen; j++ { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) - } - } - } else { - decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", - currEncodedType) - } -} - -func (f *decFnInfo) kSlice(rv reflect.Value) { - // A slice can be set from a map or array in stream. - currEncodedType := f.dd.currentEncodedType() - - switch currEncodedType { - case valueTypeBytes, valueTypeString: - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { - rv.SetBytes(bs2) - } - return - } - } - - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case intfSliceTypId: - f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) - return - case uint64SliceTypId: - f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) - return - case int64SliceTypId: - f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) - return - case strSliceTypId: - f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) - return - } - } - - containerLen, containerLenS := decContLens(f.dd, currEncodedType) - - // an array can never return a nil slice. so no need to check f.array here. - - if rv.IsNil() { - rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) - } - - if containerLen == 0 { - return - } - - if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { - if f.array { // !rv.CanSet() - decErr(msgDecCannotExpandArr, rvcap, containerLenS) - } - rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) - if rvlen > 0 { - reflect.Copy(rvn, rv) - } - rv.Set(rvn) - } else if containerLenS > rvlen { - rv.SetLen(containerLenS) - } - - for j := 0; j < containerLenS; j++ { - f.d.decodeValue(rv.Index(j)) - } -} - -func (f *decFnInfo) kArray(rv reflect.Value) { - // f.d.decodeValue(rv.Slice(0, rv.Len())) - f.kSlice(rv.Slice(0, rv.Len())) -} - -func (f *decFnInfo) kMap(rv reflect.Value) { - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case mapStrIntfTypId: - f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) - return - case mapIntfIntfTypId: - f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) - return - case mapInt64IntfTypId: - f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) - return - case mapUint64IntfTypId: - f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) - return - } - } - - containerLen := f.dd.readMapLen() - - if rv.IsNil() { - rv.Set(reflect.MakeMap(f.ti.rt)) - } - - if containerLen == 0 { - return - } - - ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() - ktypeId := reflect.ValueOf(ktype).Pointer() - for j := 0; j < containerLen; j++ { - rvk := reflect.New(ktype).Elem() - f.d.decodeValue(rvk) - - // special case if a byte array. - // if ktype == intfTyp { - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(string(rvk.Bytes())) - } - } - rvv := rv.MapIndex(rvk) - if !rvv.IsValid() { - rvv = reflect.New(vtype).Elem() - } - - f.d.decodeValue(rvv) - rv.SetMapIndex(rvk, rvv) - } -} - -// ---------------------------------------- - -type decFn struct { - i *decFnInfo - f func(*decFnInfo, reflect.Value) -} - -// A Decoder reads and decodes an object from an input stream in the codec format. -type Decoder struct { - r decReader - d decDriver - h *BasicHandle - f map[uintptr]decFn - x []uintptr - s []decFn -} - -// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Reader, bytes.Buffer). -func NewDecoder(r io.Reader, h Handle) *Decoder { - z := ioDecReader{ - r: r, - } - z.br, _ = r.(io.ByteReader) - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} -} - -// NewDecoderBytes returns a Decoder which efficiently decodes directly -// from a byte slice with zero copying. -func NewDecoderBytes(in []byte, h Handle) *Decoder { - z := bytesDecReader{ - b: in, - a: len(in), - } - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} -} - -// Decode decodes the stream from reader and stores the result in the -// value pointed to by v. v cannot be a nil pointer. v can also be -// a reflect.Value of a pointer. -// -// Note that a pointer to a nil interface is not a nil pointer. -// If you do not know what type of stream it is, pass in a pointer to a nil interface. -// We will decode and store a value in that nil interface. -// -// Sample usages: -// // Decoding into a non-nil typed value -// var f float32 -// err = codec.NewDecoder(r, handle).Decode(&f) -// -// // Decoding into nil interface -// var v interface{} -// dec := codec.NewDecoder(r, handle) -// err = dec.Decode(&v) -// -// When decoding into a nil interface{}, we will decode into an appropriate value based -// on the contents of the stream: -// - Numbers are decoded as float64, int64 or uint64. -// - Other values are decoded appropriately depending on the type: -// bool, string, []byte, time.Time, etc -// - Extensions are decoded as RawExt (if no ext function registered for the tag) -// Configurations exist on the Handle to override defaults -// (e.g. for MapType, SliceType and how to decode raw bytes). -// -// When decoding into a non-nil interface{} value, the mode of encoding is based on the -// type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error -// - Else decode it based on its reflect.Kind -// -// There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container. -// - A map can be decoded from a stream map, by updating matching keys. -// - A slice can be decoded from a stream array, -// by updating the first n elements, where n is length of the stream. -// - A slice can be decoded from a stream map, by decoding as if -// it contains a sequence of key-value pairs. -// - A struct can be decoded from a stream map, by updating matching fields. -// - A struct can be decoded from a stream array, -// by updating fields as they occur in the struct (by index). -// -// When decoding a stream map or array with length of 0 into a nil map or slice, -// we reset the destination map or slice to a zero-length value. -// -// However, when decoding a stream nil, we reset the destination container -// to its "zero" value (e.g. nil for slice/map, etc). -// -func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErr(&err) - d.decode(v) - return -} - -func (d *Decoder) decode(iv interface{}) { - d.d.initReadNext() - - switch v := iv.(type) { - case nil: - decErr("Cannot decode into nil.") - - case reflect.Value: - d.chkPtrValue(v) - d.decodeValue(v.Elem()) - - case *string: - *v = d.d.decodeString() - case *bool: - *v = d.d.decodeBool() - case *int: - *v = int(d.d.decodeInt(intBitsize)) - case *int8: - *v = int8(d.d.decodeInt(8)) - case *int16: - *v = int16(d.d.decodeInt(16)) - case *int32: - *v = int32(d.d.decodeInt(32)) - case *int64: - *v = d.d.decodeInt(64) - case *uint: - *v = uint(d.d.decodeUint(uintBitsize)) - case *uint8: - *v = uint8(d.d.decodeUint(8)) - case *uint16: - *v = uint16(d.d.decodeUint(16)) - case *uint32: - *v = uint32(d.d.decodeUint(32)) - case *uint64: - *v = d.d.decodeUint(64) - case *float32: - *v = float32(d.d.decodeFloat(true)) - case *float64: - *v = d.d.decodeFloat(false) - case *[]byte: - *v, _ = d.d.decodeBytes(*v) - - case *[]interface{}: - d.decSliceIntf(v, valueTypeInvalid, false) - case *[]uint64: - d.decSliceUint64(v, valueTypeInvalid, false) - case *[]int64: - d.decSliceInt64(v, valueTypeInvalid, false) - case *[]string: - d.decSliceStr(v, valueTypeInvalid, false) - case *map[string]interface{}: - d.decMapStrIntf(v) - case *map[interface{}]interface{}: - d.decMapIntfIntf(v) - case *map[uint64]interface{}: - d.decMapUint64Intf(v) - case *map[int64]interface{}: - d.decMapInt64Intf(v) - - case *interface{}: - d.decodeValue(reflect.ValueOf(iv).Elem()) - - default: - rv := reflect.ValueOf(iv) - d.chkPtrValue(rv) - d.decodeValue(rv.Elem()) - } -} - -func (d *Decoder) decodeValue(rv reflect.Value) { - d.d.initReadNext() - - if d.d.tryDecodeAsNil() { - // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) - if rv.Kind() == reflect.Ptr { - if !rv.IsNil() { - rv.Set(reflect.Zero(rv.Type())) - } - return - } - // for rv.Kind() == reflect.Ptr { - // rv = rv.Elem() - // } - if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid - rv.Set(reflect.Zero(rv.Type())) - } - return - } - - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - - // retrieve or register a focus'ed function for this type - // to eliminate need to do the retrieval multiple times - - // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } - var fn decFn - var ok bool - if useMapForCodecCache { - fn, ok = d.f[rtid] - } else { - for i, v := range d.x { - if v == rtid { - fn, ok = d.s[i], true - break - } - } - } - if !ok { - // debugf("\tCreating new dec fn for type: %v\n", rt) - fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} - fn.i = &fi - // An extension can be registered for any type, regardless of the Kind - // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. - // - // We can't check if it's an extension byte here first, because the user may have - // registered a pointer or non-pointer type, meaning we may have to recurse first - // before matching a mapped type, even though the extension byte is already detected. - // - // NOTE: if decoding into a nil interface{}, we return a non-nil - // value except even if the container registers a length of 0. - if rtid == rawExtTypId { - fn.f = (*decFnInfo).rawExt - } else if d.d.isBuiltinType(rtid) { - fn.f = (*decFnInfo).builtin - } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*decFnInfo).ext - } else if supportBinaryMarshal && fi.ti.unm { - fn.f = (*decFnInfo).binaryMarshal - } else { - switch rk := rt.Kind(); rk { - case reflect.String: - fn.f = (*decFnInfo).kString - case reflect.Bool: - fn.f = (*decFnInfo).kBool - case reflect.Int: - fn.f = (*decFnInfo).kInt - case reflect.Int64: - fn.f = (*decFnInfo).kInt64 - case reflect.Int32: - fn.f = (*decFnInfo).kInt32 - case reflect.Int8: - fn.f = (*decFnInfo).kInt8 - case reflect.Int16: - fn.f = (*decFnInfo).kInt16 - case reflect.Float32: - fn.f = (*decFnInfo).kFloat32 - case reflect.Float64: - fn.f = (*decFnInfo).kFloat64 - case reflect.Uint8: - fn.f = (*decFnInfo).kUint8 - case reflect.Uint64: - fn.f = (*decFnInfo).kUint64 - case reflect.Uint: - fn.f = (*decFnInfo).kUint - case reflect.Uint32: - fn.f = (*decFnInfo).kUint32 - case reflect.Uint16: - fn.f = (*decFnInfo).kUint16 - // case reflect.Ptr: - // fn.f = (*decFnInfo).kPtr - case reflect.Interface: - fn.f = (*decFnInfo).kInterface - case reflect.Struct: - fn.f = (*decFnInfo).kStruct - case reflect.Slice: - fn.f = (*decFnInfo).kSlice - case reflect.Array: - fi.array = true - fn.f = (*decFnInfo).kArray - case reflect.Map: - fn.f = (*decFnInfo).kMap - default: - fn.f = (*decFnInfo).kErr - } - } - if useMapForCodecCache { - if d.f == nil { - d.f = make(map[uintptr]decFn, 16) - } - d.f[rtid] = fn - } else { - d.s = append(d.s, fn) - d.x = append(d.x, rtid) - } - } - - fn.f(fn.i, rv) - - return -} - -func (d *Decoder) chkPtrValue(rv reflect.Value) { - // We can only decode into a non-nil pointer - if rv.Kind() == reflect.Ptr && !rv.IsNil() { - return - } - if !rv.IsValid() { - decErr("Cannot decode into a zero (ie invalid) reflect.Value") - } - if !rv.CanInterface() { - decErr("Cannot decode into a value without an interface: %v", rv) - } - rvi := rv.Interface() - decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", - rv.Kind(), rvi, rvi) -} - -func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { - // d.decodeValue(rv.FieldByIndex(index)) - // nil pointers may be here; so reproduce FieldByIndex logic + enhancements - for _, j := range index { - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - // If a pointer, it must be a pointer to struct (based on typeInfo contract) - rv = rv.Elem() - } - rv = rv.Field(j) - } - d.decodeValue(rv) -} - -// -------------------------------------------------- - -// short circuit functions for common maps and slices - -func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]interface{}, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]interface{}, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - d.decode(&s[j]) - } - *v = s -} - -func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]int64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]int64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeInt(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]uint64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]uint64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeUint(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]string, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]string, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeString() - } - *v = s -} - -func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[interface{}]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - var mk interface{} - d.decode(&mk) - // special case if a byte array. - if bv, bok := mk.([]byte); bok { - mk = string(bv) - } - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[int64]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeInt(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[uint64]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeUint(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[string]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeString() - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -// ---------------------------------------- - -func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { - if currEncodedType == valueTypeInvalid { - currEncodedType = dd.currentEncodedType() - } - switch currEncodedType { - case valueTypeArray: - containerLen = dd.readArrayLen() - containerLenS = containerLen - case valueTypeMap: - containerLen = dd.readMapLen() - containerLenS = containerLen * 2 - default: - decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", - currEncodedType) - } - return -} - -func decErr(format string, params ...interface{}) { - doPanic(msgTagDec, format, params...) -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go deleted file mode 100644 index 4914be0c74..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go +++ /dev/null @@ -1,1001 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "io" - "reflect" -) - -const ( - // Some tagging information for error messages. - msgTagEnc = "codec.encoder" - defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 - // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 -) - -// AsSymbolFlag defines what should be encoded as symbols. -type AsSymbolFlag uint8 - -const ( - // AsSymbolDefault is default. - // Currently, this means only encode struct field names as symbols. - // The default is subject to change. - AsSymbolDefault AsSymbolFlag = iota - - // AsSymbolAll means encode anything which could be a symbol as a symbol. - AsSymbolAll = 0xfe - - // AsSymbolNone means do not encode anything as a symbol. - AsSymbolNone = 1 << iota - - // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. - AsSymbolMapStringKeysFlag - - // AsSymbolStructFieldName means encode struct field names as symbols. - AsSymbolStructFieldNameFlag -) - -// encWriter abstracting writing to a byte array or to an io.Writer. -type encWriter interface { - writeUint16(uint16) - writeUint32(uint32) - writeUint64(uint64) - writeb([]byte) - writestr(string) - writen1(byte) - writen2(byte, byte) - atEndOfEncode() -} - -// encDriver abstracts the actual codec (binc vs msgpack, etc) -type encDriver interface { - isBuiltinType(rt uintptr) bool - encodeBuiltin(rt uintptr, v interface{}) - encodeNil() - encodeInt(i int64) - encodeUint(i uint64) - encodeBool(b bool) - encodeFloat32(f float32) - encodeFloat64(f float64) - encodeExtPreamble(xtag byte, length int) - encodeArrayPreamble(length int) - encodeMapPreamble(length int) - encodeString(c charEncoding, v string) - encodeSymbol(v string) - encodeStringBytes(c charEncoding, v []byte) - //TODO - //encBignum(f *big.Int) - //encStringRunes(c charEncoding, v []rune) -} - -type ioEncWriterWriter interface { - WriteByte(c byte) error - WriteString(s string) (n int, err error) - Write(p []byte) (n int, err error) -} - -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) -} - -type EncodeOptions struct { - // Encode a struct as an array, and not as a map. - StructToArray bool - - // AsSymbols defines what should be encoded as symbols. - // - // Encoding as symbols can reduce the encoded size significantly. - // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. - // - // Sample values: - // AsSymbolNone - // AsSymbolAll - // AsSymbolMapStringKeys - // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - AsSymbols AsSymbolFlag -} - -// --------------------------------------------- - -type simpleIoEncWriterWriter struct { - w io.Writer - bw io.ByteWriter - sw ioEncStringWriter -} - -func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { - if o.bw != nil { - return o.bw.WriteByte(c) - } - _, err = o.w.Write([]byte{c}) - return -} - -func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { - if o.sw != nil { - return o.sw.WriteString(s) - } - return o.w.Write([]byte(s)) -} - -func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { - return o.w.Write(p) -} - -// ---------------------------------------- - -// ioEncWriter implements encWriter and can write to an io.Writer implementation -type ioEncWriter struct { - w ioEncWriterWriter - x [8]byte // temp byte array re-used internally for efficiency -} - -func (z *ioEncWriter) writeUint16(v uint16) { - bigen.PutUint16(z.x[:2], v) - z.writeb(z.x[:2]) -} - -func (z *ioEncWriter) writeUint32(v uint32) { - bigen.PutUint32(z.x[:4], v) - z.writeb(z.x[:4]) -} - -func (z *ioEncWriter) writeUint64(v uint64) { - bigen.PutUint64(z.x[:8], v) - z.writeb(z.x[:8]) -} - -func (z *ioEncWriter) writeb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := z.w.Write(bs) - if err != nil { - panic(err) - } - if n != len(bs) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) - } -} - -func (z *ioEncWriter) writestr(s string) { - n, err := z.w.WriteString(s) - if err != nil { - panic(err) - } - if n != len(s) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) - } -} - -func (z *ioEncWriter) writen1(b byte) { - if err := z.w.WriteByte(b); err != nil { - panic(err) - } -} - -func (z *ioEncWriter) writen2(b1 byte, b2 byte) { - z.writen1(b1) - z.writen1(b2) -} - -func (z *ioEncWriter) atEndOfEncode() {} - -// ---------------------------------------- - -// bytesEncWriter implements encWriter and can write to an byte slice. -// It is used by Marshal function. -type bytesEncWriter struct { - b []byte - c int // cursor - out *[]byte // write out on atEndOfEncode -} - -func (z *bytesEncWriter) writeUint16(v uint16) { - c := z.grow(2) - z.b[c] = byte(v >> 8) - z.b[c+1] = byte(v) -} - -func (z *bytesEncWriter) writeUint32(v uint32) { - c := z.grow(4) - z.b[c] = byte(v >> 24) - z.b[c+1] = byte(v >> 16) - z.b[c+2] = byte(v >> 8) - z.b[c+3] = byte(v) -} - -func (z *bytesEncWriter) writeUint64(v uint64) { - c := z.grow(8) - z.b[c] = byte(v >> 56) - z.b[c+1] = byte(v >> 48) - z.b[c+2] = byte(v >> 40) - z.b[c+3] = byte(v >> 32) - z.b[c+4] = byte(v >> 24) - z.b[c+5] = byte(v >> 16) - z.b[c+6] = byte(v >> 8) - z.b[c+7] = byte(v) -} - -func (z *bytesEncWriter) writeb(s []byte) { - if len(s) == 0 { - return - } - c := z.grow(len(s)) - copy(z.b[c:], s) -} - -func (z *bytesEncWriter) writestr(s string) { - c := z.grow(len(s)) - copy(z.b[c:], s) -} - -func (z *bytesEncWriter) writen1(b1 byte) { - c := z.grow(1) - z.b[c] = b1 -} - -func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - c := z.grow(2) - z.b[c] = b1 - z.b[c+1] = b2 -} - -func (z *bytesEncWriter) atEndOfEncode() { - *(z.out) = z.b[:z.c] -} - -func (z *bytesEncWriter) grow(n int) (oldcursor int) { - oldcursor = z.c - z.c = oldcursor + n - if z.c > cap(z.b) { - // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). - // However, it was too expensive, causing too many iterations of copy. - // Using bytes.Buffer model was much better (2*cap + n) - bs := make([]byte, 2*cap(z.b)+n) - copy(bs, z.b[:oldcursor]) - z.b = bs - } else if z.c > len(z.b) { - z.b = z.b[:cap(z.b)] - } - return -} - -// --------------------------------------------- - -type encFnInfo struct { - ti *typeInfo - e *Encoder - ee encDriver - xfFn func(reflect.Value) ([]byte, error) - xfTag byte -} - -func (f *encFnInfo) builtin(rv reflect.Value) { - f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) -} - -func (f *encFnInfo) rawExt(rv reflect.Value) { - f.e.encRawExt(rv.Interface().(RawExt)) -} - -func (f *encFnInfo) ext(rv reflect.Value) { - bs, fnerr := f.xfFn(rv) - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - return - } - if f.e.hh.writeExt() { - f.ee.encodeExtPreamble(f.xfTag, len(bs)) - f.e.w.writeb(bs) - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } - -} - -func (f *encFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryMarshaler - if f.ti.mIndir == 0 { - bm = rv.Interface().(binaryMarshaler) - } else if f.ti.mIndir == -1 { - bm = rv.Addr().Interface().(binaryMarshaler) - } else { - for j, k := int8(0), f.ti.mIndir; j < k; j++ { - if rv.IsNil() { - f.ee.encodeNil() - return - } - rv = rv.Elem() - } - bm = rv.Interface().(binaryMarshaler) - } - // debugf(">>>> binaryMarshaler: %T", rv.Interface()) - bs, fnerr := bm.MarshalBinary() - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } -} - -func (f *encFnInfo) kBool(rv reflect.Value) { - f.ee.encodeBool(rv.Bool()) -} - -func (f *encFnInfo) kString(rv reflect.Value) { - f.ee.encodeString(c_UTF8, rv.String()) -} - -func (f *encFnInfo) kFloat64(rv reflect.Value) { - f.ee.encodeFloat64(rv.Float()) -} - -func (f *encFnInfo) kFloat32(rv reflect.Value) { - f.ee.encodeFloat32(float32(rv.Float())) -} - -func (f *encFnInfo) kInt(rv reflect.Value) { - f.ee.encodeInt(rv.Int()) -} - -func (f *encFnInfo) kUint(rv reflect.Value) { - f.ee.encodeUint(rv.Uint()) -} - -func (f *encFnInfo) kInvalid(rv reflect.Value) { - f.ee.encodeNil() -} - -func (f *encFnInfo) kErr(rv reflect.Value) { - encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) -} - -func (f *encFnInfo) kSlice(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case intfSliceTypId: - f.e.encSliceIntf(rv.Interface().([]interface{})) - return - case strSliceTypId: - f.e.encSliceStr(rv.Interface().([]string)) - return - case uint64SliceTypId: - f.e.encSliceUint64(rv.Interface().([]uint64)) - return - case int64SliceTypId: - f.e.encSliceInt64(rv.Interface().([]int64)) - return - } - } - - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - f.ee.encodeStringBytes(c_RAW, rv.Bytes()) - return - } - - l := rv.Len() - if f.ti.mbs { - if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) - } - f.ee.encodeMapPreamble(l / 2) - } else { - f.ee.encodeArrayPreamble(l) - } - if l == 0 { - return - } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) - } -} - -func (f *encFnInfo) kArray(rv reflect.Value) { - // We cannot share kSlice method, because the array may be non-addressable. - // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". - // So we have to duplicate the functionality here. - // f.e.encodeValue(rv.Slice(0, rv.Len())) - // f.kSlice(rv.Slice(0, rv.Len())) - - l := rv.Len() - // Handle an array of bytes specially (in line with what is done for slices) - if f.ti.rt.Elem().Kind() == reflect.Uint8 { - if l == 0 { - f.ee.encodeStringBytes(c_RAW, nil) - return - } - var bs []byte - if rv.CanAddr() { - bs = rv.Slice(0, l).Bytes() - } else { - bs = make([]byte, l) - for i := 0; i < l; i++ { - bs[i] = byte(rv.Index(i).Uint()) - } - } - f.ee.encodeStringBytes(c_RAW, bs) - return - } - - if f.ti.mbs { - if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) - } - f.ee.encodeMapPreamble(l / 2) - } else { - f.ee.encodeArrayPreamble(l) - } - if l == 0 { - return - } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) - } -} - -func (f *encFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - newlen := len(fti.sfi) - rvals := make([]reflect.Value, newlen) - var encnames []string - e := f.e - tisfi := fti.sfip - toMap := !(fti.toArray || e.h.StructToArray) - // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) - if toMap { - tisfi = fti.sfi - encnames = make([]string, newlen) - } - newlen = 0 - for _, si := range tisfi { - if si.i != -1 { - rvals[newlen] = rv.Field(int(si.i)) - } else { - rvals[newlen] = rv.FieldByIndex(si.is) - } - if toMap { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { - continue - } - encnames[newlen] = si.encName - } else { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { - rvals[newlen] = reflect.Value{} //encode as nil - } - } - newlen++ - } - - // debugf(">>>> kStruct: newlen: %v", newlen) - if toMap { - ee := f.ee //don't dereference everytime - ee.encodeMapPreamble(newlen) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - for j := 0; j < newlen; j++ { - if asSymbols { - ee.encodeSymbol(encnames[j]) - } else { - ee.encodeString(c_UTF8, encnames[j]) - } - e.encodeValue(rvals[j]) - } - } else { - f.ee.encodeArrayPreamble(newlen) - for j := 0; j < newlen; j++ { - e.encodeValue(rvals[j]) - } - } -} - -// func (f *encFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") -// if rv.IsNil() { -// f.ee.encodeNil() -// return -// } -// f.e.encodeValue(rv.Elem()) -// } - -func (f *encFnInfo) kInterface(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - f.e.encodeValue(rv.Elem()) -} - -func (f *encFnInfo) kMap(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case mapIntfIntfTypId: - f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) - return - case mapStrIntfTypId: - f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) - return - case mapStrStrTypId: - f.e.encMapStrStr(rv.Interface().(map[string]string)) - return - case mapInt64IntfTypId: - f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) - return - case mapUint64IntfTypId: - f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) - return - } - } - - l := rv.Len() - f.ee.encodeMapPreamble(l) - if l == 0 { - return - } - // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String - keyTypeIsString := f.ti.rt.Key() == stringTyp - var asSymbols bool - if keyTypeIsString { - asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - } - mks := rv.MapKeys() - // for j, lmks := 0, len(mks); j < lmks; j++ { - for j := range mks { - if keyTypeIsString { - if asSymbols { - f.ee.encodeSymbol(mks[j].String()) - } else { - f.ee.encodeString(c_UTF8, mks[j].String()) - } - } else { - f.e.encodeValue(mks[j]) - } - f.e.encodeValue(rv.MapIndex(mks[j])) - } - -} - -// -------------------------------------------------- - -// encFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type encFn struct { - i *encFnInfo - f func(*encFnInfo, reflect.Value) -} - -// -------------------------------------------------- - -// An Encoder writes an object to an output stream in the codec format. -type Encoder struct { - w encWriter - e encDriver - h *BasicHandle - hh Handle - f map[uintptr]encFn - x []uintptr - s []encFn -} - -// NewEncoder returns an Encoder for encoding into an io.Writer. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Writer, bytes.Buffer). -func NewEncoder(w io.Writer, h Handle) *Encoder { - ww, ok := w.(ioEncWriterWriter) - if !ok { - sww := simpleIoEncWriterWriter{w: w} - sww.bw, _ = w.(io.ByteWriter) - sww.sw, _ = w.(ioEncStringWriter) - ww = &sww - //ww = bufio.NewWriterSize(w, defEncByteBufSize) - } - z := ioEncWriter{ - w: ww, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} -} - -// NewEncoderBytes returns an encoder for encoding directly and efficiently -// into a byte slice, using zero-copying to temporary slices. -// -// It will potentially replace the output byte slice pointed to. -// After encoding, the out parameter contains the encoded contents. -func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - in := *out - if in == nil { - in = make([]byte, defEncByteBufSize) - } - z := bytesEncWriter{ - b: in, - out: out, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} -} - -// Encode writes an object into a stream in the codec format. -// -// Encoding can be configured via the "codec" struct tag for the fields. -// -// The "codec" key in struct field's tag value is the key name, -// followed by an optional comma and options. -// -// To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. -// -// Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's codec tag is "-", OR -// - the field is empty and its codec tag specifies the "omitempty" option. -// -// When encoding as a map, the first string in the tag (before the comma) -// is the map key string to use when encoding. -// -// However, struct values may encode as arrays. This happens when: -// - StructToArray Encode option is set, OR -// - the codec tag on the _struct field sets the "toarray" option -// -// Values with types that implement MapBySlice are encoded as stream maps. -// -// The empty values (for omitempty option) are false, 0, any nil pointer -// or interface value, and any array, slice, map, or string of length zero. -// -// Anonymous fields are encoded inline if no struct tag is present. -// Else they are encoded as regular fields. -// -// Examples: -// -// type MyStruct struct { -// _struct bool `codec:",omitempty"` //set omitempty for every field -// Field1 string `codec:"-"` //skip this field -// Field2 int `codec:"myName"` //Use key "myName" in encode stream -// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. -// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. -// ... -// } -// -// type MyStruct struct { -// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field -// //and encode struct as an array -// } -// -// The mode of encoding is based on the type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) -// - Else encode it based on its reflect.Kind -// -// Note that struct field names and keys in map[string]XXX will be treated as symbols. -// Some formats support symbols (e.g. binc) and will properly encode the string -// only once in the stream, and use a tag to refer to it thereafter. -func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErr(&err) - e.encode(v) - e.w.atEndOfEncode() - return -} - -func (e *Encoder) encode(iv interface{}) { - switch v := iv.(type) { - case nil: - e.e.encodeNil() - - case reflect.Value: - e.encodeValue(v) - - case string: - e.e.encodeString(c_UTF8, v) - case bool: - e.e.encodeBool(v) - case int: - e.e.encodeInt(int64(v)) - case int8: - e.e.encodeInt(int64(v)) - case int16: - e.e.encodeInt(int64(v)) - case int32: - e.e.encodeInt(int64(v)) - case int64: - e.e.encodeInt(v) - case uint: - e.e.encodeUint(uint64(v)) - case uint8: - e.e.encodeUint(uint64(v)) - case uint16: - e.e.encodeUint(uint64(v)) - case uint32: - e.e.encodeUint(uint64(v)) - case uint64: - e.e.encodeUint(v) - case float32: - e.e.encodeFloat32(v) - case float64: - e.e.encodeFloat64(v) - - case []interface{}: - e.encSliceIntf(v) - case []string: - e.encSliceStr(v) - case []int64: - e.encSliceInt64(v) - case []uint64: - e.encSliceUint64(v) - case []uint8: - e.e.encodeStringBytes(c_RAW, v) - - case map[interface{}]interface{}: - e.encMapIntfIntf(v) - case map[string]interface{}: - e.encMapStrIntf(v) - case map[string]string: - e.encMapStrStr(v) - case map[int64]interface{}: - e.encMapInt64Intf(v) - case map[uint64]interface{}: - e.encMapUint64Intf(v) - - case *string: - e.e.encodeString(c_UTF8, *v) - case *bool: - e.e.encodeBool(*v) - case *int: - e.e.encodeInt(int64(*v)) - case *int8: - e.e.encodeInt(int64(*v)) - case *int16: - e.e.encodeInt(int64(*v)) - case *int32: - e.e.encodeInt(int64(*v)) - case *int64: - e.e.encodeInt(*v) - case *uint: - e.e.encodeUint(uint64(*v)) - case *uint8: - e.e.encodeUint(uint64(*v)) - case *uint16: - e.e.encodeUint(uint64(*v)) - case *uint32: - e.e.encodeUint(uint64(*v)) - case *uint64: - e.e.encodeUint(*v) - case *float32: - e.e.encodeFloat32(*v) - case *float64: - e.e.encodeFloat64(*v) - - case *[]interface{}: - e.encSliceIntf(*v) - case *[]string: - e.encSliceStr(*v) - case *[]int64: - e.encSliceInt64(*v) - case *[]uint64: - e.encSliceUint64(*v) - case *[]uint8: - e.e.encodeStringBytes(c_RAW, *v) - - case *map[interface{}]interface{}: - e.encMapIntfIntf(*v) - case *map[string]interface{}: - e.encMapStrIntf(*v) - case *map[string]string: - e.encMapStrStr(*v) - case *map[int64]interface{}: - e.encMapInt64Intf(*v) - case *map[uint64]interface{}: - e.encMapUint64Intf(*v) - - default: - e.encodeValue(reflect.ValueOf(iv)) - } -} - -func (e *Encoder) encodeValue(rv reflect.Value) { - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - e.e.encodeNil() - return - } - rv = rv.Elem() - } - - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - - // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } - var fn encFn - var ok bool - if useMapForCodecCache { - fn, ok = e.f[rtid] - } else { - for i, v := range e.x { - if v == rtid { - fn, ok = e.s[i], true - break - } - } - } - if !ok { - // debugf("\tCreating new enc fn for type: %v\n", rt) - fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} - fn.i = &fi - if rtid == rawExtTypId { - fn.f = (*encFnInfo).rawExt - } else if e.e.isBuiltinType(rtid) { - fn.f = (*encFnInfo).builtin - } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*encFnInfo).ext - } else if supportBinaryMarshal && fi.ti.m { - fn.f = (*encFnInfo).binaryMarshal - } else { - switch rk := rt.Kind(); rk { - case reflect.Bool: - fn.f = (*encFnInfo).kBool - case reflect.String: - fn.f = (*encFnInfo).kString - case reflect.Float64: - fn.f = (*encFnInfo).kFloat64 - case reflect.Float32: - fn.f = (*encFnInfo).kFloat32 - case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: - fn.f = (*encFnInfo).kInt - case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: - fn.f = (*encFnInfo).kUint - case reflect.Invalid: - fn.f = (*encFnInfo).kInvalid - case reflect.Slice: - fn.f = (*encFnInfo).kSlice - case reflect.Array: - fn.f = (*encFnInfo).kArray - case reflect.Struct: - fn.f = (*encFnInfo).kStruct - // case reflect.Ptr: - // fn.f = (*encFnInfo).kPtr - case reflect.Interface: - fn.f = (*encFnInfo).kInterface - case reflect.Map: - fn.f = (*encFnInfo).kMap - default: - fn.f = (*encFnInfo).kErr - } - } - if useMapForCodecCache { - if e.f == nil { - e.f = make(map[uintptr]encFn, 16) - } - e.f[rtid] = fn - } else { - e.s = append(e.s, fn) - e.x = append(e.x, rtid) - } - } - - fn.f(fn.i, rv) - -} - -func (e *Encoder) encRawExt(re RawExt) { - if re.Data == nil { - e.e.encodeNil() - return - } - if e.hh.writeExt() { - e.e.encodeExtPreamble(re.Tag, len(re.Data)) - e.w.writeb(re.Data) - } else { - e.e.encodeStringBytes(c_RAW, re.Data) - } -} - -// --------------------------------------------- -// short circuit functions for common maps and slices - -func (e *Encoder) encSliceIntf(v []interface{}) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.encode(v2) - } -} - -func (e *Encoder) encSliceStr(v []string) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeString(c_UTF8, v2) - } -} - -func (e *Encoder) encSliceInt64(v []int64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeInt(v2) - } -} - -func (e *Encoder) encSliceUint64(v []uint64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeUint(v2) - } -} - -func (e *Encoder) encMapStrStr(v map[string]string) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.e.encodeString(c_UTF8, v2) - } -} - -func (e *Encoder) encMapStrIntf(v map[string]interface{}) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.encode(v2) - } -} - -func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeInt(k2) - e.encode(v2) - } -} - -func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeUint(uint64(k2)) - e.encode(v2) - } -} - -func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.encode(k2) - e.encode(v2) - } -} - -// ---------------------------------------- - -func encErr(format string, params ...interface{}) { - doPanic(msgTagEnc, format, params...) -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go deleted file mode 100644 index e6dc0563f0..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go +++ /dev/null @@ -1,589 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// Contains code shared by both encode and decode. - -import ( - "encoding/binary" - "fmt" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" - "unicode" - "unicode/utf8" -) - -const ( - structTagName = "codec" - - // Support - // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) - // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error - // This constant flag will enable or disable it. - supportBinaryMarshal = true - - // Each Encoder or Decoder uses a cache of functions based on conditionals, - // so that the conditionals are not run every time. - // - // Either a map or a slice is used to keep track of the functions. - // The map is more natural, but has a higher cost than a slice/array. - // This flag (useMapForCodecCache) controls which is used. - useMapForCodecCache = false - - // For some common container types, we can short-circuit an elaborate - // reflection dance and call encode/decode directly. - // The currently supported types are: - // - slices of strings, or id's (int64,uint64) or interfaces. - // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf - shortCircuitReflectToFastPath = true - - // for debugging, set this to false, to catch panic traces. - // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. - recoverPanicToErr = true -) - -type charEncoding uint8 - -const ( - c_RAW charEncoding = iota - c_UTF8 - c_UTF16LE - c_UTF16BE - c_UTF32LE - c_UTF32BE -) - -// valueType is the stream type -type valueType uint8 - -const ( - valueTypeUnset valueType = iota - valueTypeNil - valueTypeInt - valueTypeUint - valueTypeFloat - valueTypeBool - valueTypeString - valueTypeSymbol - valueTypeBytes - valueTypeMap - valueTypeArray - valueTypeTimestamp - valueTypeExt - - valueTypeInvalid = 0xff -) - -var ( - bigen = binary.BigEndian - structInfoFieldName = "_struct" - - cachedTypeInfo = make(map[uintptr]*typeInfo, 4) - cachedTypeInfoMutex sync.RWMutex - - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - strSliceTyp = reflect.TypeOf([]string(nil)) - boolSliceTyp = reflect.TypeOf([]bool(nil)) - uintSliceTyp = reflect.TypeOf([]uint(nil)) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - uint16SliceTyp = reflect.TypeOf([]uint16(nil)) - uint32SliceTyp = reflect.TypeOf([]uint32(nil)) - uint64SliceTyp = reflect.TypeOf([]uint64(nil)) - intSliceTyp = reflect.TypeOf([]int(nil)) - int8SliceTyp = reflect.TypeOf([]int8(nil)) - int16SliceTyp = reflect.TypeOf([]int16(nil)) - int32SliceTyp = reflect.TypeOf([]int32(nil)) - int64SliceTyp = reflect.TypeOf([]int64(nil)) - float32SliceTyp = reflect.TypeOf([]float32(nil)) - float64SliceTyp = reflect.TypeOf([]float64(nil)) - - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) - mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) - - mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) - mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) - mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) - mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() - - rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() - intfTypId = reflect.ValueOf(intfTyp).Pointer() - timeTypId = reflect.ValueOf(timeTyp).Pointer() - - intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() - strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() - - boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() - uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() - uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() - uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() - uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() - uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() - intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() - int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() - int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() - int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() - int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() - float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() - float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() - - mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() - mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() - mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() - mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() - mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() - mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() - mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() - // Id = reflect.ValueOf().Pointer() - // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() - - binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() - binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() - - intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) - uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) - - bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} - bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} -) - -type binaryUnmarshaler interface { - UnmarshalBinary(data []byte) error -} - -type binaryMarshaler interface { - MarshalBinary() (data []byte, err error) -} - -// MapBySlice represents a slice which should be encoded as a map in the stream. -// The slice contains a sequence of key-value pairs. -type MapBySlice interface { - MapBySlice() -} - -// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. -// -// BasicHandle encapsulates the common options and extension functions. -type BasicHandle struct { - extHandle - EncodeOptions - DecodeOptions -} - -// Handle is the interface for a specific encoding format. -// -// Typically, a Handle is pre-configured before first time use, -// and not modified while in use. Such a pre-configured Handle -// is safe for concurrent access. -type Handle interface { - writeExt() bool - getBasicHandle() *BasicHandle - newEncDriver(w encWriter) encDriver - newDecDriver(r decReader) decDriver -} - -// RawExt represents raw unprocessed extension data. -type RawExt struct { - Tag byte - Data []byte -} - -type extTypeTagFn struct { - rtid uintptr - rt reflect.Type - tag byte - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} - -type extHandle []*extTypeTagFn - -// AddExt registers an encode and decode function for a reflect.Type. -// Note that the type must be a named type, and specifically not -// a pointer or Interface. An error is returned if that is not honored. -// -// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. -func (o *extHandle) AddExt( - rt reflect.Type, - tag byte, - encfn func(reflect.Value) ([]byte, error), - decfn func(reflect.Value, []byte) error, -) (err error) { - // o is a pointer, because we may need to initialize it - if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", - reflect.Zero(rt).Interface()) - return - } - - // o cannot be nil, since it is always embedded in a Handle. - // if nil, let it panic. - // if o == nil { - // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") - // return - // } - - rtid := reflect.ValueOf(rt).Pointer() - for _, v := range *o { - if v.rtid == rtid { - v.tag, v.encFn, v.decFn = tag, encfn, decfn - return - } - } - - *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) - return -} - -func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { - for _, v := range o { - if v.rtid == rtid { - return v - } - } - return nil -} - -func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { - for _, v := range o { - if v.tag == tag { - return v - } - } - return nil -} - -func (o extHandle) getDecodeExtForTag(tag byte) ( - rv reflect.Value, fn func(reflect.Value, []byte) error) { - if x := o.getExtForTag(tag); x != nil { - // ext is only registered for base - rv = reflect.New(x.rt).Elem() - fn = x.decFn - } - return -} - -func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.decFn - } - return -} - -func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.encFn - } - return -} - -type structFieldInfo struct { - encName string // encode name - - // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. - - is []int // (recursive/embedded) field index in struct - i int16 // field index in struct - omitEmpty bool - toArray bool // if field is _struct, is the toArray set? - - // tag string // tag - // name string // field name - // encNameBs []byte // encoded name as byte stream - // ikind int // kind of the field as an int i.e. int(reflect.Kind) -} - -func parseStructFieldInfo(fname string, stag string) *structFieldInfo { - if fname == "" { - panic("parseStructFieldInfo: No Field Name") - } - si := structFieldInfo{ - // name: fname, - encName: fname, - // tag: stag, - } - - if stag != "" { - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s - } - } else { - switch s { - case "omitempty": - si.omitEmpty = true - case "toarray": - si.toArray = true - } - } - } - } - // si.encNameBs = []byte(si.encName) - return &si -} - -type sfiSortedByEncName []*structFieldInfo - -func (p sfiSortedByEncName) Len() int { - return len(p) -} - -func (p sfiSortedByEncName) Less(i, j int) bool { - return p[i].encName < p[j].encName -} - -func (p sfiSortedByEncName) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -// typeInfo keeps information about each type referenced in the encode/decode sequence. -// -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. - - rt reflect.Type - rtid uintptr - - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base - - mbs bool // base type (T or *T) is a MapBySlice - - m bool // base type (T or *T) is a binaryMarshaler - unm bool // base type (T or *T) is a binaryUnmarshaler - mIndir int8 // number of indirections to get to binaryMarshaler type - unmIndir int8 // number of indirections to get to binaryUnmarshaler type - toArray bool // whether this (struct) type should be encoded as an array -} - -func (ti *typeInfo) indexForEncName(name string) int { - //tisfi := ti.sfi - const binarySearchThreshold = 16 - if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { - // linear search. faster than binary search in my testing up to 16-field structs. - for i, si := range ti.sfi { - if si.encName == name { - return i - } - } - } else { - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i - } - } - return -1 -} - -func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - var ok bool - cachedTypeInfoMutex.RLock() - pti, ok = cachedTypeInfo[rtid] - cachedTypeInfoMutex.RUnlock() - if ok { - return - } - - cachedTypeInfoMutex.Lock() - defer cachedTypeInfoMutex.Unlock() - if pti, ok = cachedTypeInfo[rtid]; ok { - return - } - - ti := typeInfo{rt: rt, rtid: rtid} - pti = &ti - - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.m, ti.mIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.unm, ti.unmIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } - - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = reflect.ValueOf(pt).Pointer() - ti.baseIndir = ptIndir - } - - if rt.Kind() == reflect.Struct { - var siInfo *structFieldInfo - if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) - ti.toArray = siInfo.toArray - } - sfip := make([]*structFieldInfo, 0, rt.NumField()) - rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) - - // // try to put all si close together - // const tryToPutAllStructFieldInfoTogether = true - // if tryToPutAllStructFieldInfoTogether { - // sfip2 := make([]structFieldInfo, len(sfip)) - // for i, si := range sfip { - // sfip2[i] = *si - // } - // for i := range sfip { - // sfip[i] = &sfip2[i] - // } - // } - - ti.sfip = make([]*structFieldInfo, len(sfip)) - ti.sfi = make([]*structFieldInfo, len(sfip)) - copy(ti.sfip, sfip) - sort.Sort(sfiSortedByEncName(sfip)) - copy(ti.sfi, sfip) - } - // sfi = sfip - cachedTypeInfo[rtid] = pti - return -} - -func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, - sfi *[]*structFieldInfo, siInfo *structFieldInfo, -) { - // for rt.Kind() == reflect.Ptr { - // // indexstack = append(indexstack, 0) - // rt = rt.Elem() - // } - for j := 0; j < rt.NumField(); j++ { - f := rt.Field(j) - stag := f.Tag.Get(structTagName) - if stag == "-" { - continue - } - if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { - continue - } - // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. - if f.Anonymous && stag == "" { - ft := f.Type - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - if ft.Kind() == reflect.Struct { - indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) - continue - } - } - // do not let fields with same name in embedded structs override field at higher level. - // this must be done after anonymous check, to allow anonymous field - // still include their child fields - if _, ok := fnameToHastag[f.Name]; ok { - continue - } - si := parseStructFieldInfo(f.Name, stag) - // si.ikind = int(f.Type.Kind()) - if len(indexstack) == 0 { - si.i = int16(j) - } else { - si.i = -1 - si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - } - - if siInfo != nil { - if siInfo.omitEmpty { - si.omitEmpty = true - } - } - *sfi = append(*sfi, si) - fnameToHastag[f.Name] = stag != "" - } -} - -func panicToErr(err *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - //debug.PrintStack() - panicValToErr(x, err) - } - } -} - -func doPanic(tag string, format string, params ...interface{}) { - params2 := make([]interface{}, len(params)+1) - params2[0] = tag - copy(params2[1:], params) - panic(fmt.Errorf("%s: "+format, params2...)) -} - -func checkOverflowFloat32(f float64, doCheck bool) { - if !doCheck { - return - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() - f2 := f - if f2 < 0 { - f2 = -f - } - if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { - decErr("Overflow float32 value: %v", f2) - } -} - -func checkOverflow(ui uint64, i int64, bitsize uint8) { - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize == 0 { - return - } - if i != 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) - } - } - if ui != 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) - } - } -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go deleted file mode 100644 index 58417da958..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// All non-std package dependencies live in this file, -// so porting to different environment is easy (just update functions). - -import ( - "errors" - "fmt" - "math" - "reflect" -) - -var ( - raisePanicAfterRecover = false - debugging = true -) - -func panicValToErr(panicVal interface{}, err *error) { - switch xerr := panicVal.(type) { - case error: - *err = xerr - case string: - *err = errors.New(xerr) - default: - *err = fmt.Errorf("%v", panicVal) - } - if raisePanicAfterRecover { - panic(panicVal) - } - return -} - -func isEmptyValueDeref(v reflect.Value, deref bool) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return isEmptyValueDeref(v.Elem(), deref) - } else { - return v.IsNil() - } - case reflect.Struct: - // return true if all fields are empty. else return false. - - // we cannot use equality check, because some fields may be maps/slices/etc - // and consequently the structs are not comparable. - // return v.Interface() == reflect.Zero(v.Type()).Interface() - for i, n := 0, v.NumField(); i < n; i++ { - if !isEmptyValueDeref(v.Field(i), deref) { - return false - } - } - return true - } - return false -} - -func isEmptyValue(v reflect.Value) bool { - return isEmptyValueDeref(v, true) -} - -func debugf(format string, args ...interface{}) { - if debugging { - if len(format) == 0 || format[len(format)-1] != '\n' { - format = format + "\n" - } - fmt.Printf(format, args...) - } -} - -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } - } - return -} - -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return - } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue - } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 - } - } - return false, 0 -} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go deleted file mode 100644 index da0500d192..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go +++ /dev/null @@ -1,816 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -/* -MSGPACK - -Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. -We need to maintain compatibility with it and how it encodes integer values -without caring about the type. - -For compatibility with behaviour of msgpack-c reference implementation: - - Go intX (>0) and uintX - IS ENCODED AS - msgpack +ve fixnum, unsigned - - Go intX (<0) - IS ENCODED AS - msgpack -ve fixnum, signed - -*/ -package codec - -import ( - "fmt" - "io" - "math" - "net/rpc" -) - -const ( - mpPosFixNumMin byte = 0x00 - mpPosFixNumMax = 0x7f - mpFixMapMin = 0x80 - mpFixMapMax = 0x8f - mpFixArrayMin = 0x90 - mpFixArrayMax = 0x9f - mpFixStrMin = 0xa0 - mpFixStrMax = 0xbf - mpNil = 0xc0 - _ = 0xc1 - mpFalse = 0xc2 - mpTrue = 0xc3 - mpFloat = 0xca - mpDouble = 0xcb - mpUint8 = 0xcc - mpUint16 = 0xcd - mpUint32 = 0xce - mpUint64 = 0xcf - mpInt8 = 0xd0 - mpInt16 = 0xd1 - mpInt32 = 0xd2 - mpInt64 = 0xd3 - - // extensions below - mpBin8 = 0xc4 - mpBin16 = 0xc5 - mpBin32 = 0xc6 - mpExt8 = 0xc7 - mpExt16 = 0xc8 - mpExt32 = 0xc9 - mpFixExt1 = 0xd4 - mpFixExt2 = 0xd5 - mpFixExt4 = 0xd6 - mpFixExt8 = 0xd7 - mpFixExt16 = 0xd8 - - mpStr8 = 0xd9 // new - mpStr16 = 0xda - mpStr32 = 0xdb - - mpArray16 = 0xdc - mpArray32 = 0xdd - - mpMap16 = 0xde - mpMap32 = 0xdf - - mpNegFixNumMin = 0xe0 - mpNegFixNumMax = 0xff -) - -// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec -// that the backend RPC service takes multiple arguments, which have been arranged -// in sequence in the slice. -// -// The Codec then passes it AS-IS to the rpc service (without wrapping it in an -// array of 1 element). -type MsgpackSpecRpcMultiArgs []interface{} - -// A MsgpackContainer type specifies the different types of msgpackContainers. -type msgpackContainerType struct { - fixCutoff int - bFixMin, b8, b16, b32 byte - hasFixMin, has8, has8Always bool -} - -var ( - msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} - msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} - msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} - msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} -) - -//--------------------------------------------- - -type msgpackEncDriver struct { - w encWriter - h *MsgpackHandle -} - -func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} - -func (e *msgpackEncDriver) encodeNil() { - e.w.writen1(mpNil) -} - -func (e *msgpackEncDriver) encodeInt(i int64) { - - switch { - case i >= 0: - e.encodeUint(uint64(i)) - case i >= -32: - e.w.writen1(byte(i)) - case i >= math.MinInt8: - e.w.writen2(mpInt8, byte(i)) - case i >= math.MinInt16: - e.w.writen1(mpInt16) - e.w.writeUint16(uint16(i)) - case i >= math.MinInt32: - e.w.writen1(mpInt32) - e.w.writeUint32(uint32(i)) - default: - e.w.writen1(mpInt64) - e.w.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) encodeUint(i uint64) { - switch { - case i <= math.MaxInt8: - e.w.writen1(byte(i)) - case i <= math.MaxUint8: - e.w.writen2(mpUint8, byte(i)) - case i <= math.MaxUint16: - e.w.writen1(mpUint16) - e.w.writeUint16(uint16(i)) - case i <= math.MaxUint32: - e.w.writen1(mpUint32) - e.w.writeUint32(uint32(i)) - default: - e.w.writen1(mpUint64) - e.w.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) encodeBool(b bool) { - if b { - e.w.writen1(mpTrue) - } else { - e.w.writen1(mpFalse) - } -} - -func (e *msgpackEncDriver) encodeFloat32(f float32) { - e.w.writen1(mpFloat) - e.w.writeUint32(math.Float32bits(f)) -} - -func (e *msgpackEncDriver) encodeFloat64(f float64) { - e.w.writen1(mpDouble) - e.w.writeUint64(math.Float64bits(f)) -} - -func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - switch { - case l == 1: - e.w.writen2(mpFixExt1, xtag) - case l == 2: - e.w.writen2(mpFixExt2, xtag) - case l == 4: - e.w.writen2(mpFixExt4, xtag) - case l == 8: - e.w.writen2(mpFixExt8, xtag) - case l == 16: - e.w.writen2(mpFixExt16, xtag) - case l < 256: - e.w.writen2(mpExt8, byte(l)) - e.w.writen1(xtag) - case l < 65536: - e.w.writen1(mpExt16) - e.w.writeUint16(uint16(l)) - e.w.writen1(xtag) - default: - e.w.writen1(mpExt32) - e.w.writeUint32(uint32(l)) - e.w.writen1(xtag) - } -} - -func (e *msgpackEncDriver) encodeArrayPreamble(length int) { - e.writeContainerLen(msgpackContainerList, length) -} - -func (e *msgpackEncDriver) encodeMapPreamble(length int) { - e.writeContainerLen(msgpackContainerMap, length) -} - -func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(s)) - } else { - e.writeContainerLen(msgpackContainerStr, len(s)) - } - if len(s) > 0 { - e.w.writestr(s) - } -} - -func (e *msgpackEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) -} - -func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(bs)) - } else { - e.writeContainerLen(msgpackContainerStr, len(bs)) - } - if len(bs) > 0 { - e.w.writeb(bs) - } -} - -func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - switch { - case ct.hasFixMin && l < ct.fixCutoff: - e.w.writen1(ct.bFixMin | byte(l)) - case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): - e.w.writen2(ct.b8, uint8(l)) - case l < 65536: - e.w.writen1(ct.b16) - e.w.writeUint16(uint16(l)) - default: - e.w.writen1(ct.b32) - e.w.writeUint32(uint32(l)) - } -} - -//--------------------------------------------- - -type msgpackDecDriver struct { - r decReader - h *MsgpackHandle - bd byte - bdRead bool - bdType valueType -} - -func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} - -// Note: This returns either a primitive (int, bool, etc) for non-containers, -// or a containerType, or a specific type denoting nil or extension. -// It is called when a nil interface{} is passed, leaving it up to the DecDriver -// to introspect the stream and decide how best to decode. -// It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() - bd := d.bd - - switch bd { - case mpNil: - vt = valueTypeNil - d.bdRead = false - case mpFalse: - vt = valueTypeBool - v = false - case mpTrue: - vt = valueTypeBool - v = true - - case mpFloat: - vt = valueTypeFloat - v = float64(math.Float32frombits(d.r.readUint32())) - case mpDouble: - vt = valueTypeFloat - v = math.Float64frombits(d.r.readUint64()) - - case mpUint8: - vt = valueTypeUint - v = uint64(d.r.readn1()) - case mpUint16: - vt = valueTypeUint - v = uint64(d.r.readUint16()) - case mpUint32: - vt = valueTypeUint - v = uint64(d.r.readUint32()) - case mpUint64: - vt = valueTypeUint - v = uint64(d.r.readUint64()) - - case mpInt8: - vt = valueTypeInt - v = int64(int8(d.r.readn1())) - case mpInt16: - vt = valueTypeInt - v = int64(int16(d.r.readUint16())) - case mpInt32: - vt = valueTypeInt - v = int64(int32(d.r.readUint32())) - case mpInt64: - vt = valueTypeInt - v = int64(int64(d.r.readUint64())) - - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - // positive fixnum (always signed) - vt = valueTypeInt - v = int64(int8(bd)) - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - // negative fixnum - vt = valueTypeInt - v = int64(int8(bd)) - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - var rvm string - vt = valueTypeString - v = &rvm - } else { - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm - } - decodeFurther = true - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm - decodeFurther = true - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - vt = valueTypeArray - decodeFurther = true - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - vt = valueTypeMap - decodeFurther = true - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - clen := d.readExtLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(clen) - v = &re - vt = valueTypeExt - default: - decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - if !decodeFurther { - d.bdRead = false - } - return -} - -// int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { - switch d.bd { - case mpUint8: - i = int64(uint64(d.r.readn1())) - case mpUint16: - i = int64(uint64(d.r.readUint16())) - case mpUint32: - i = int64(uint64(d.r.readUint32())) - case mpUint64: - i = int64(d.r.readUint64()) - case mpInt8: - i = int64(int8(d.r.readn1())) - case mpInt16: - i = int64(int16(d.r.readUint16())) - case mpInt32: - i = int64(int32(d.r.readUint32())) - case mpInt64: - i = int64(d.r.readUint64()) - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - i = int64(int8(d.bd)) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - i = int64(int8(d.bd)) - default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) - } - } - d.bdRead = false - return -} - -// uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { - switch d.bd { - case mpUint8: - ui = uint64(d.r.readn1()) - case mpUint16: - ui = uint64(d.r.readUint16()) - case mpUint32: - ui = uint64(d.r.readUint32()) - case mpUint64: - ui = d.r.readUint64() - case mpInt8: - if i := int64(int8(d.r.readn1())); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - case mpInt16: - if i := int64(int16(d.r.readUint16())); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - case mpInt32: - if i := int64(int32(d.r.readUint32())); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - case mpInt64: - if i := int64(d.r.readUint64()); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - ui = uint64(d.bd) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) - default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) - } - } - d.bdRead = false - return -} - -// float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case mpFloat: - f = float64(math.Float32frombits(d.r.readUint32())) - case mpDouble: - f = math.Float64frombits(d.r.readUint64()) - default: - f = float64(d.decodeInt(0)) - } - checkOverflowFloat32(f, chkOverflow32) - d.bdRead = false - return -} - -// bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) decodeBool() (b bool) { - switch d.bd { - case mpFalse, 0: - // b = false - case mpTrue, 1: - b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) decodeString() (s string) { - clen := d.readContainerLen(msgpackContainerStr) - if clen > 0 { - s = string(d.r.readn(clen)) - } - d.bdRead = false - return -} - -// Callers must check if changed=true (to decide whether to replace the one they have) -func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - // bytes can be decoded from msgpackContainerStr or msgpackContainerBin - var clen int - switch d.bd { - case mpBin8, mpBin16, mpBin32: - clen = d.readContainerLen(msgpackContainerBin) - default: - clen = d.readContainerLen(msgpackContainerStr) - } - // if clen < 0 { - // changed = true - // panic("length cannot be zero. this cannot be nil.") - // } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - // Return changed=true if length of passed slice diff from length of bytes in stream - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) - } - d.bdRead = false - return -} - -// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. -func (d *msgpackDecDriver) initReadNext() { - if d.bdRead { - return - } - d.bd = d.r.readn1() - d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *msgpackDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - bd := d.bd - switch bd { - case mpNil: - d.bdType = valueTypeNil - case mpFalse, mpTrue: - d.bdType = valueTypeBool - case mpFloat, mpDouble: - d.bdType = valueTypeFloat - case mpUint8, mpUint16, mpUint32, mpUint64: - d.bdType = valueTypeUint - case mpInt8, mpInt16, mpInt32, mpInt64: - d.bdType = valueTypeInt - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - d.bdType = valueTypeInt - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - d.bdType = valueTypeInt - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - d.bdType = valueTypeString - } else { - d.bdType = valueTypeBytes - } - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - d.bdType = valueTypeBytes - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - d.bdType = valueTypeArray - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - d.bdType = valueTypeMap - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - d.bdType = valueTypeExt - default: - decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - } - return d.bdType -} - -func (d *msgpackDecDriver) tryDecodeAsNil() bool { - if d.bd == mpNil { - d.bdRead = false - return true - } - return false -} - -func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { - bd := d.bd - switch { - case bd == mpNil: - clen = -1 // to represent nil - case bd == ct.b8: - clen = int(d.r.readn1()) - case bd == ct.b16: - clen = int(d.r.readUint16()) - case bd == ct.b32: - clen = int(d.r.readUint32()) - case (ct.bFixMin & bd) == ct.bFixMin: - clen = int(ct.bFixMin ^ bd) - default: - decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) readMapLen() int { - return d.readContainerLen(msgpackContainerMap) -} - -func (d *msgpackDecDriver) readArrayLen() int { - return d.readContainerLen(msgpackContainerList) -} - -func (d *msgpackDecDriver) readExtLen() (clen int) { - switch d.bd { - case mpNil: - clen = -1 // to represent nil - case mpFixExt1: - clen = 1 - case mpFixExt2: - clen = 2 - case mpFixExt4: - clen = 4 - case mpFixExt8: - clen = 8 - case mpFixExt16: - clen = 16 - case mpExt8: - clen = int(d.r.readn1()) - case mpExt16: - clen = int(d.r.readUint16()) - case mpExt32: - clen = int(d.r.readUint32()) - default: - decErr("decoding ext bytes: found unexpected byte: %x", d.bd) - } - return -} - -func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - xbd := d.bd - switch { - case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: - xbs, _ = d.decodeBytes(nil) - case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, - xbd >= mpFixStrMin && xbd <= mpFixStrMax: - xbs = []byte(d.decodeString()) - default: - clen := d.readExtLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - xbs = d.r.readn(clen) - } - d.bdRead = false - return -} - -//-------------------------------------------------- - -//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. -type MsgpackHandle struct { - BasicHandle - - // RawToString controls how raw bytes are decoded into a nil interface{}. - RawToString bool - // WriteExt flag supports encoding configured extensions with extension tags. - // It also controls whether other elements of the new spec are encoded (ie Str8). - // - // With WriteExt=false, configured extensions are serialized as raw bytes - // and Str8 is not encoded. - // - // A stream can still be decoded into a typed value, provided an appropriate value - // is provided, but the type cannot be inferred from the stream. If no appropriate - // type is provided (e.g. decoding into a nil interface{}), you get back - // a []byte or string based on the setting of RawToString. - WriteExt bool -} - -func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { - return &msgpackEncDriver{w: w, h: h} -} - -func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { - return &msgpackDecDriver{r: r, h: h} -} - -func (h *MsgpackHandle) writeExt() bool { - return h.WriteExt -} - -func (h *MsgpackHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle -} - -//-------------------------------------------------- - -type msgpackSpecRpcCodec struct { - rpcCodec -} - -// /////////////// Spec RPC Codec /////////////////// -func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // WriteRequest can write to both a Go service, and other services that do - // not abide by the 1 argument rule of a Go service. - // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs - var bodyArr []interface{} - if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { - bodyArr = ([]interface{})(m) - } else { - bodyArr = []interface{}{body} - } - r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - var moe interface{} - if r.Error != "" { - moe = r.Error - } - if moe != nil && body != nil { - body = nil - } - r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.parseCustomHeader(1, &r.Seq, &r.Error) -} - -func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) -} - -func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { - if body == nil { // read and discard - return c.read(nil) - } - bodyArr := []interface{}{body} - return c.read(&bodyArr) -} - -func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - - if c.cls { - return io.EOF - } - - // We read the response header by hand - // so that the body can be decoded on its own from the stream at a later time. - - const fia byte = 0x94 //four item array descriptor value - // Not sure why the panic of EOF is swallowed above. - // if bs1 := c.dec.r.readn1(); bs1 != fia { - // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) - // return - // } - var b byte - b, err = c.br.ReadByte() - if err != nil { - return - } - if b != fia { - err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) - return - } - - if err = c.read(&b); err != nil { - return - } - if b != expectTypeByte { - err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) - return - } - if err = c.read(msgid); err != nil { - return - } - if err = c.read(methodOrError); err != nil { - return - } - return -} - -//-------------------------------------------------- - -// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol -// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md -type msgpackSpecRpc struct{} - -// MsgpackSpecRpc implements Rpc using the communication protocol defined in -// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var MsgpackSpecRpc msgpackSpecRpc - -func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -var _ decDriver = (*msgpackDecDriver)(nil) -var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py deleted file mode 100755 index e933838c56..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python - -# This will create golden files in a directory passed to it. -# A Test calls this internally to create the golden files -# So it can process them (so we don't have to checkin the files). - -import msgpack, msgpackrpc, sys, os, threading - -def get_test_data_list(): - # get list with all primitive types, and a combo type - l0 = [ - -8, - -1616, - -32323232, - -6464646464646464, - 192, - 1616, - 32323232, - 6464646464646464, - 192, - -3232.0, - -6464646464.0, - 3232.0, - 6464646464.0, - False, - True, - None, - "someday", - "", - "bytestring", - 1328176922000002000, - -2206187877999998000, - 0, - -6795364578871345152 - ] - l1 = [ - { "true": True, - "false": False }, - { "true": "True", - "false": False, - "uint16(1616)": 1616 }, - { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], - "int32":32323232, "bool": True, - "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": "1234567890" }, - { True: "true", 8: False, "false": 0 } - ] - - l = [] - l.extend(l0) - l.append(l0) - l.extend(l1) - return l - -def build_test_data(destdir): - l = get_test_data_list() - for i in range(len(l)): - packer = msgpack.Packer() - serialized = packer.pack(l[i]) - f = open(os.path.join(destdir, str(i) + '.golden'), 'wb') - f.write(serialized) - f.close() - -def doRpcServer(port, stopTimeSec): - class EchoHandler(object): - def Echo123(self, msg1, msg2, msg3): - return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) - def EchoStruct(self, msg): - return ("%s" % msg) - - addr = msgpackrpc.Address('localhost', port) - server = msgpackrpc.Server(EchoHandler()) - server.listen(addr) - # run thread to stop it after stopTimeSec seconds if > 0 - if stopTimeSec > 0: - def myStopRpcServer(): - server.stop() - t = threading.Timer(stopTimeSec, myStopRpcServer) - t.start() - server.start() - -def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("Echo123", "A1", "B2", "C3") - print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doRpcClientToGoSvc(port): - # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) - print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doMain(args): - if len(args) == 2 and args[0] == "testdata": - build_test_data(args[1]) - elif len(args) == 3 and args[0] == "rpc-server": - doRpcServer(int(args[1]), int(args[2])) - elif len(args) == 2 and args[0] == "rpc-client-python-service": - doRpcClientToPythonSvc(int(args[1])) - elif len(args) == 2 and args[0] == "rpc-client-go-service": - doRpcClientToGoSvc(int(args[1])) - else: - print("Usage: msgpack_test.py " + - "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") - -if __name__ == "__main__": - doMain(sys.argv[1:]) - diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go deleted file mode 100644 index d014dbdcc7..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "bufio" - "io" - "net/rpc" - "sync" -) - -// Rpc provides a rpc Server or Client Codec for rpc communication. -type Rpc interface { - ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec - ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec -} - -// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer -// used by the rpc connection. It accomodates use-cases where the connection -// should be used by rpc and non-rpc functions, e.g. streaming a file after -// sending an rpc response. -type RpcCodecBuffered interface { - BufferedReader() *bufio.Reader - BufferedWriter() *bufio.Writer -} - -// ------------------------------------- - -// rpcCodec defines the struct members and common methods. -type rpcCodec struct { - rwc io.ReadWriteCloser - dec *Decoder - enc *Encoder - bw *bufio.Writer - br *bufio.Reader - mu sync.Mutex - cls bool -} - -func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { - bw := bufio.NewWriter(conn) - br := bufio.NewReader(conn) - return rpcCodec{ - rwc: conn, - bw: bw, - br: br, - enc: NewEncoder(bw, h), - dec: NewDecoder(br, h), - } -} - -func (c *rpcCodec) BufferedReader() *bufio.Reader { - return c.br -} - -func (c *rpcCodec) BufferedWriter() *bufio.Writer { - return c.bw -} - -func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { - if c.cls { - return io.EOF - } - if err = c.enc.Encode(obj1); err != nil { - return - } - if writeObj2 { - if err = c.enc.Encode(obj2); err != nil { - return - } - } - if doFlush && c.bw != nil { - return c.bw.Flush() - } - return -} - -func (c *rpcCodec) read(obj interface{}) (err error) { - if c.cls { - return io.EOF - } - //If nil is passed in, we should still attempt to read content to nowhere. - if obj == nil { - var obj2 interface{} - return c.dec.Decode(&obj2) - } - return c.dec.Decode(obj) -} - -func (c *rpcCodec) Close() error { - if c.cls { - return io.EOF - } - c.cls = true - return c.rwc.Close() -} - -func (c *rpcCodec) ReadResponseBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -type goRpcCodec struct { - rpcCodec -} - -func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // Must protect for concurrent access as per API - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -// goRpc is the implementation of Rpc that uses the communication protocol -// as defined in net/rpc package. -type goRpc struct{} - -// GoRpc implements Rpc using the communication protocol defined in net/rpc package. -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var GoRpc goRpc - -func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go deleted file mode 100644 index 9e4d148a2a..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import "math" - -const ( - _ uint8 = iota - simpleVdNil = 1 - simpleVdFalse = 2 - simpleVdTrue = 3 - simpleVdFloat32 = 4 - simpleVdFloat64 = 5 - - // each lasts for 4 (ie n, n+1, n+2, n+3) - simpleVdPosInt = 8 - simpleVdNegInt = 12 - - // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) - simpleVdString = 216 - simpleVdByteArray = 224 - simpleVdArray = 232 - simpleVdMap = 240 - simpleVdExt = 248 -) - -type simpleEncDriver struct { - h *SimpleHandle - w encWriter - //b [8]byte -} - -func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { - return false -} - -func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { -} - -func (e *simpleEncDriver) encodeNil() { - e.w.writen1(simpleVdNil) -} - -func (e *simpleEncDriver) encodeBool(b bool) { - if b { - e.w.writen1(simpleVdTrue) - } else { - e.w.writen1(simpleVdFalse) - } -} - -func (e *simpleEncDriver) encodeFloat32(f float32) { - e.w.writen1(simpleVdFloat32) - e.w.writeUint32(math.Float32bits(f)) -} - -func (e *simpleEncDriver) encodeFloat64(f float64) { - e.w.writen1(simpleVdFloat64) - e.w.writeUint64(math.Float64bits(f)) -} - -func (e *simpleEncDriver) encodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-v), simpleVdNegInt) - } else { - e.encUint(uint64(v), simpleVdPosInt) - } -} - -func (e *simpleEncDriver) encodeUint(v uint64) { - e.encUint(v, simpleVdPosInt) -} - -func (e *simpleEncDriver) encUint(v uint64, bd uint8) { - switch { - case v <= math.MaxUint8: - e.w.writen2(bd, uint8(v)) - case v <= math.MaxUint16: - e.w.writen1(bd + 1) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: - e.w.writen1(bd + 2) - e.w.writeUint32(uint32(v)) - case v <= math.MaxUint64: - e.w.writen1(bd + 3) - e.w.writeUint64(v) - } -} - -func (e *simpleEncDriver) encLen(bd byte, length int) { - switch { - case length == 0: - e.w.writen1(bd) - case length <= math.MaxUint8: - e.w.writen1(bd + 1) - e.w.writen1(uint8(length)) - case length <= math.MaxUint16: - e.w.writen1(bd + 2) - e.w.writeUint16(uint16(length)) - case int64(length) <= math.MaxUint32: - e.w.writen1(bd + 3) - e.w.writeUint32(uint32(length)) - default: - e.w.writen1(bd + 4) - e.w.writeUint64(uint64(length)) - } -} - -func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(simpleVdExt, length) - e.w.writen1(xtag) -} - -func (e *simpleEncDriver) encodeArrayPreamble(length int) { - e.encLen(simpleVdArray, length) -} - -func (e *simpleEncDriver) encodeMapPreamble(length int) { - e.encLen(simpleVdMap, length) -} - -func (e *simpleEncDriver) encodeString(c charEncoding, v string) { - e.encLen(simpleVdString, len(v)) - e.w.writestr(v) -} - -func (e *simpleEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) -} - -func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { - e.encLen(simpleVdByteArray, len(v)) - e.w.writeb(v) -} - -//------------------------------------ - -type simpleDecDriver struct { - h *SimpleHandle - r decReader - bdRead bool - bdType valueType - bd byte - //b [8]byte -} - -func (d *simpleDecDriver) initReadNext() { - if d.bdRead { - return - } - d.bd = d.r.readn1() - d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *simpleDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.bd { - case simpleVdNil: - d.bdType = valueTypeNil - case simpleVdTrue, simpleVdFalse: - d.bdType = valueTypeBool - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - d.bdType = valueTypeUint - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - d.bdType = valueTypeInt - case simpleVdFloat32, simpleVdFloat64: - d.bdType = valueTypeFloat - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - d.bdType = valueTypeString - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - d.bdType = valueTypeBytes - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - d.bdType = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - d.bdType = valueTypeArray - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) - } - } - return d.bdType -} - -func (d *simpleDecDriver) tryDecodeAsNil() bool { - if d.bd == simpleVdNil { - d.bdRead = false - return true - } - return false -} - -func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { - return false -} - -func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { -} - -func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { - switch d.bd { - case simpleVdPosInt: - ui = uint64(d.r.readn1()) - i = int64(ui) - case simpleVdPosInt + 1: - ui = uint64(d.r.readUint16()) - i = int64(ui) - case simpleVdPosInt + 2: - ui = uint64(d.r.readUint32()) - i = int64(ui) - case simpleVdPosInt + 3: - ui = uint64(d.r.readUint64()) - i = int64(ui) - case simpleVdNegInt: - ui = uint64(d.r.readn1()) - i = -(int64(ui)) - neg = true - case simpleVdNegInt + 1: - ui = uint64(d.r.readUint16()) - i = -(int64(ui)) - neg = true - case simpleVdNegInt + 2: - ui = uint64(d.r.readUint32()) - i = -(int64(ui)) - neg = true - case simpleVdNegInt + 3: - ui = uint64(d.r.readUint64()) - i = -(int64(ui)) - neg = true - default: - decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) - } - // don't do this check, because callers may only want the unsigned value. - // if ui > math.MaxInt64 { - // decErr("decIntAny: Integer out of range for signed int64: %v", ui) - // } - return -} - -func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() - if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - checkOverflow(ui, 0, bitsize) - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case simpleVdFloat32: - f = float64(math.Float32frombits(d.r.readUint32())) - case simpleVdFloat64: - f = math.Float64frombits(d.r.readUint64()) - default: - if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - _, i, _ := d.decIntAny() - f = float64(i) - } else { - decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) - } - } - checkOverflowFloat32(f, chkOverflow32) - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) decodeBool() (b bool) { - switch d.bd { - case simpleVdTrue: - b = true - case simpleVdFalse: - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) readMapLen() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) readArrayLen() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) decLen() int { - switch d.bd % 8 { - case 0: - return 0 - case 1: - return int(d.r.readn1()) - case 2: - return int(d.r.readUint16()) - case 3: - ui := uint64(d.r.readUint32()) - checkOverflow(ui, 0, intBitsize) - return int(ui) - case 4: - ui := d.r.readUint64() - checkOverflow(ui, 0, intBitsize) - return int(ui) - } - decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) - return -1 -} - -func (d *simpleDecDriver) decodeString() (s string) { - s = string(d.r.readn(d.decLen())) - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - if clen := d.decLen(); clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - switch d.bd { - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - xbs = d.r.readn(l) - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs, _ = d.decodeBytes(nil) - default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() - - switch d.bd { - case simpleVdNil: - vt = valueTypeNil - case simpleVdFalse: - vt = valueTypeBool - v = false - case simpleVdTrue: - vt = valueTypeBool - v = true - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - vt = valueTypeUint - ui, _, _ := d.decIntAny() - v = ui - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - vt = valueTypeInt - _, i, _ := d.decIntAny() - v = i - case simpleVdFloat32: - vt = valueTypeFloat - v = d.decodeFloat(true) - case simpleVdFloat64: - vt = valueTypeFloat - v = d.decodeFloat(false) - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - vt = valueTypeString - v = d.decodeString() - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - vt = valueTypeExt - l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - vt = valueTypeArray - decodeFurther = true - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - vt = valueTypeMap - decodeFurther = true - default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -// SimpleHandle is a Handle for a very simple encoding format. -// -// simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceeded by the descriptor byte (bd) -// - True, false, nil are encoded fully in 1 byte (the descriptor) -// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). -// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. -// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Lenght of containers (strings, bytes, array, map, extensions) -// are encoded in 0, 1, 2, 4 or 8 bytes. -// Zero-length containers have no length encoded. -// For others, the number of bytes is given by pow(2, bd%3) -// - maps are encoded as [bd] [length] [[key][value]]... -// - arrays are encoded as [bd] [length] [value]... -// - extensions are encoded as [bd] [length] [tag] [byte]... -// - strings/bytearrays are encoded as [bd] [length] [byte]... -// -// The full spec will be published soon. -type SimpleHandle struct { - BasicHandle -} - -func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { - return &simpleEncDriver{w: w, h: h} -} - -func (h *SimpleHandle) newDecDriver(r decReader) decDriver { - return &simpleDecDriver{r: r, h: h} -} - -func (_ *SimpleHandle) writeExt() bool { - return true -} - -func (h *SimpleHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle -} - -var _ decDriver = (*simpleDecDriver)(nil) -var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/time.go b/vendor/github.com/hashicorp/go-msgpack/codec/time.go deleted file mode 100644 index c86d65328d..0000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/time.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "time" -) - -var ( - timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} -) - -// EncodeTime encodes a time.Time as a []byte, including -// information on the instant in time and UTC offset. -// -// Format Description -// -// A timestamp is composed of 3 components: -// -// - secs: signed integer representing seconds since unix epoch -// - nsces: unsigned integer representing fractional seconds as a -// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 -// - tz: signed integer representing timezone offset in minutes east of UTC, -// and a dst (daylight savings time) flag -// -// When encoding a timestamp, the first byte is the descriptor, which -// defines which components are encoded and how many bytes are used to -// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it -// is not encoded in the byte array explicitly*. -// -// Descriptor 8 bits are of the form `A B C DDD EE`: -// A: Is secs component encoded? 1 = true -// B: Is nsecs component encoded? 1 = true -// C: Is tz component encoded? 1 = true -// DDD: Number of extra bytes for secs (range 0-7). -// If A = 1, secs encoded in DDD+1 bytes. -// If A = 0, secs is not encoded, and is assumed to be 0. -// If A = 1, then we need at least 1 byte to encode secs. -// DDD says the number of extra bytes beyond that 1. -// E.g. if DDD=0, then secs is represented in 1 byte. -// if DDD=2, then secs is represented in 3 bytes. -// EE: Number of extra bytes for nsecs (range 0-3). -// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) -// -// Following the descriptor bytes, subsequent bytes are: -// -// secs component encoded in `DDD + 1` bytes (if A == 1) -// nsecs component encoded in `EE + 1` bytes (if B == 1) -// tz component encoded in 2 bytes (if C == 1) -// -// secs and nsecs components are integers encoded in a BigEndian -// 2-complement encoding format. -// -// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to -// Least significant bit 0 are described below: -// -// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). -// Bit 15 = have\_dst: set to 1 if we set the dst flag. -// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. -// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. -// -func encodeTime(t time.Time) []byte { - //t := rv.Interface().(time.Time) - tsecs, tnsecs := t.Unix(), t.Nanosecond() - var ( - bd byte - btmp [8]byte - bs [16]byte - i int = 1 - ) - l := t.Location() - if l == time.UTC { - l = nil - } - if tsecs != 0 { - bd = bd | 0x80 - bigen.PutUint64(btmp[:], uint64(tsecs)) - f := pruneSignExt(btmp[:], tsecs >= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// DecodeTime decodes a []byte into a time.Time. -func decodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printed. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - //tzname[3] = '-' (TODO: verify. this works here) - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return -} - -func timeLocUTCName(tzint int16) string { - if tzint == 0 { - return "UTC" - } - var tzname = []byte("UTC+00:00") - //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. - //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first - var tzhr, tzmin int16 - if tzint < 0 { - tzname[3] = '-' // (TODO: verify. this works here) - tzhr, tzmin = -tzint/60, (-tzint)%60 - } else { - tzhr, tzmin = tzint/60, tzint%60 - } - tzname[4] = timeDigits[tzhr/10] - tzname[5] = timeDigits[tzhr%10] - tzname[7] = timeDigits[tzmin/10] - tzname[8] = timeDigits[tzmin%10] - return string(tzname) - //return time.FixedZone(string(tzname), int(tzint)*60) -} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index 2ea0827329..89b1422d1d 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -40,11 +40,11 @@ func (e *Error) GoString() string { } // WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementatin of the errwrap.Wrapper interface so that +// It is an implementation of the errwrap.Wrapper interface so that // multierror.Error can be used with that library. // // This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implementd only to +// than accessing the Errors field directly. It is implemented only to // satisfy the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { return e.Errors diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 28c6355640..53ac65fcfd 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -23,6 +23,11 @@ interface as if it were going to run in the same process. For a plugin user: you just use and call functions on an interface as if it were in the same process. This plugin system handles the communication in between. +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + **Complex arguments and return values are supported.** This library provides APIs for handling complex arguments and return values such as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library @@ -68,10 +73,11 @@ must be properly secured to protect this configuration. ## Architecture The HashiCorp plugin system works by launching subprocesses and communicating -over RPC (using standard `net/rpc`). A single connection is made between -any plugin and the host process, and we use a -[connection multiplexing](https://github.com/hashicorp/yamux) -library to multiplex any other connections on top. +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. This architecture has a number of benefits: @@ -79,8 +85,8 @@ This architecture has a number of benefits: panic the plugin user. * Plugins are very easy to write: just write a Go application and `go build`. - Theoretically you could also use another language as long as it can - communicate the Go `net/rpc` protocol but this hasn't yet been tried. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. * Plugins are very easy to install: just put the binary in a location where the host will find it (depends on the host but this library also provides @@ -88,8 +94,8 @@ This architecture has a number of benefits: * Plugins can be relatively secure: The plugin only has access to the interfaces and args given to it, not to the entire memory space of the - process. More security features are planned (see the coming soon section - below). + process. Additionally, go-plugin can communicate with the plugin over + TLS. ## Usage @@ -100,10 +106,9 @@ high-level steps that must be done. Examples are available in the 1. Choose the interface(s) you want to expose for plugins. 2. For each interface, implement an implementation of that interface - that communicates over an `*rpc.Client` (from the standard `net/rpc` - package) for every function call. Likewise, implement the RPC server - struct this communicates to which is then communicating to a real, - concrete implementation. + that communicates over a `net/rpc` connection or other a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. 3. Create a `Plugin` implementation that knows how to create the RPC client/server for a given plugin type. diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index b69d41b28d..6d2f9d8cc6 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -74,7 +74,8 @@ type Client struct { l sync.Mutex address net.Addr process *os.Process - client *RPCClient + client ClientProtocol + protocol Protocol } // ClientConfig is the configuration used to initialize a new @@ -135,14 +136,28 @@ type ClientConfig struct { // sync any of these streams. SyncStdout io.Writer SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol } // ReattachConfig is used to configure a client to reattach to an // already-running plugin process. You can retrieve this information by // calling ReattachConfig on Client. type ReattachConfig struct { - Addr net.Addr - Pid int + Protocol Protocol + Addr net.Addr + Pid int } // SecureConfig is used to configure a client to verify the integrity of an @@ -242,6 +257,10 @@ func NewClient(config *ClientConfig) (c *Client) { config.SyncStderr = ioutil.Discard } + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + c = &Client{config: config} if config.Managed { managedClientsLock.Lock() @@ -252,11 +271,11 @@ func NewClient(config *ClientConfig) (c *Client) { return } -// Client returns an RPC client for the plugin. +// Client returns the protocol client for this connection. // -// Subsequent calls to this will return the same RPC client. -func (c *Client) Client() (*RPCClient, error) { - addr, err := c.Start() +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() if err != nil { return nil, err } @@ -268,33 +287,18 @@ func (c *Client) Client() (*RPCClient, error) { return c.client, nil } - // Connect to the client - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) + + case ProtocolGRPC: + c.client, err = newGRPCClient(c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) } - if c.config.TLSConfig != nil { - conn = tls.Client(conn, c.config.TLSConfig) - } - - // Create the actual RPC client - c.client, err = NewRPCClient(conn, c.config.Plugins) if err != nil { - conn.Close() - return nil, err - } - - // Begin the stream syncing so that stdin, out, err work properly - err = c.client.SyncStreams( - c.config.SyncStdout, - c.config.SyncStderr) - if err != nil { - c.client.Close() c.client = nil return nil, err } @@ -441,6 +445,11 @@ func (c *Client) Start() (addr net.Addr, err error) { // Set the address and process c.address = c.config.Reattach.Addr c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } return c.address, nil } @@ -560,7 +569,7 @@ func (c *Client) Start() (addr net.Addr, err error) { // Trim the line and split by "|" in order to get the parts of // the output. line := strings.TrimSpace(string(lineBytes)) - parts := strings.SplitN(line, "|", 4) + parts := strings.SplitN(line, "|", 6) if len(parts) < 4 { err = fmt.Errorf( "Unrecognized remote plugin message: %s\n\n"+ @@ -610,6 +619,27 @@ func (c *Client) Start() (addr net.Addr, err error) { default: err = fmt.Errorf("Unknown address type: %s", parts[3]) } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return + } + } c.address = addr @@ -640,11 +670,48 @@ func (c *Client) ReattachConfig() *ReattachConfig { } return &ReattachConfig{ - Addr: c.address, - Pid: c.config.Cmd.Process.Pid, + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, } } +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + func (c *Client) logStderr(r io.Reader) { bufR := bufio.NewReader(r) for { diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 0000000000..3bcf95efc5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,83 @@ +package plugin + +import ( + "fmt" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(c *Client) (*GRPCClient, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0, 5) + + // We use a custom dialer so that we can connect over unix domain sockets + opts = append(opts, grpc.WithDialer(c.dialer)) + + // go-plugin expects to block the connection + opts = append(opts, grpc.WithBlock()) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if c.config.TLSConfig == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(c.config.TLSConfig))) + } + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + }, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 0000000000..177a0cdd7d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,115 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatibile plugin", k) + } + + if err := p.GRPCServer(s.server); err != nil { + return fmt.Errorf("error registring %q: %s", k, err) + } + } + + return nil +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + // Start serving in a goroutine + go s.server.Serve(lis) + + // Wait until graceful completion + <-s.DoneCh +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go index 37c8fd653f..6b7bdd1cfd 100644 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -9,7 +9,10 @@ package plugin import ( + "errors" "net/rpc" + + "google.golang.org/grpc" ) // Plugin is the interface that is implemented to serve/connect to an @@ -23,3 +26,31 @@ type Plugin interface { // serving that communicates to the server end of the plugin. Client(*MuxBroker, *rpc.Client) (interface{}, error) } + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. + GRPCClient(*grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 0000000000..0cfc19e52d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go index 31038c55e0..f30a4b1d38 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -1,6 +1,7 @@ package plugin import ( + "crypto/tls" "fmt" "io" "net" @@ -19,6 +20,42 @@ type RPCClient struct { stdout, stderr net.Conn } +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + // NewRPCClient creates a client from an already-open connection-like value. // Dial is typically used instead. func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go index 3e59373a53..5bb18dd5db 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -34,10 +34,14 @@ type RPCServer struct { lock sync.Mutex } -// Accept accepts connections on a listener and serves requests for -// each incoming connection. Accept blocks; the caller typically invokes -// it in a go statement. -func (s *RPCServer) Accept(lis net.Listener) { +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { for { conn, err := lis.Accept() if err != nil { diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 782a4e119d..781d01110b 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -2,6 +2,7 @@ package plugin import ( "crypto/tls" + "encoding/base64" "errors" "fmt" "io/ioutil" @@ -12,6 +13,8 @@ import ( "runtime" "strconv" "sync/atomic" + + "google.golang.org/grpc" ) // CoreProtocolVersion is the ProtocolVersion of the plugin system itself. @@ -46,11 +49,31 @@ type ServeConfig struct { // HandshakeConfig is the configuration that must match clients. HandshakeConfig + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + // Plugins are the plugins that are served. Plugins map[string]Plugin - // TLSProvider is a function that returns a configured tls.Config. - TLSProvider func() (*tls.Config, error) + // GRPCServer shoudl be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server +} + +// Protocol returns the protocol that this server should speak. +func (c *ServeConfig) Protocol() Protocol { + result := ProtocolNetRPC + if c.GRPCServer != nil { + result = ProtocolGRPC + } + + return result } // Serve serves the plugins given by ServeConfig. @@ -101,35 +124,82 @@ func Serve(opts *ServeConfig) { return } + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config if opts.TLSProvider != nil { - tlsConfig, err := opts.TLSProvider() + tlsConfig, err = opts.TLSProvider() if err != nil { log.Printf("[ERR] plugin: plugin tls init: %s", err) return } - listener = tls.NewListener(listener, tlsConfig) } - defer listener.Close() // Create the channel to tell us when we're done doneCh := make(chan struct{}) - // Create the RPC server to dispense - server := &RPCServer{ - Plugins: opts.Plugins, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, + // Build the server type + var server ServerProtocol + switch opts.Protocol() { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: opts.Plugins, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: opts.Plugins, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + default: + panic("unknown server protocol: " + opts.Protocol()) + } + + // Initialize the servers + if err := server.Init(); err != nil { + log.Printf("[ERR] plugin: protocol init: %s", err) + return + } + + // Build the extra configuration + extra := "" + if v := server.Config(); v != "" { + extra = base64.StdEncoding.EncodeToString([]byte(v)) + } + if extra != "" { + extra = "|" + extra } // Output the address and service name to stdout so that core can bring it up. log.Printf("[DEBUG] plugin: plugin address: %s %s\n", listener.Addr().Network(), listener.Addr().String()) - fmt.Printf("%d|%d|%s|%s\n", + fmt.Printf("%d|%d|%s|%s|%s%s\n", CoreProtocolVersion, opts.ProtocolVersion, listener.Addr().Network(), - listener.Addr().String()) + listener.Addr().String(), + opts.Protocol(), + extra) os.Stdout.Sync() // Eat the interrupts @@ -150,10 +220,8 @@ func Serve(opts *ServeConfig) { os.Stdout = stdout_w os.Stderr = stderr_w - // Serve - go server.Accept(listener) - - // Wait for the graceful exit + // Accept connections and wait for completion + go server.Serve(listener) <-doneCh } diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go index 9086a1b45f..c58e54d120 100644 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -5,6 +5,8 @@ import ( "net" "net/rpc" "testing" + + "google.golang.org/grpc" ) // The testing file contains test helpers that you can use outside of @@ -74,3 +76,45 @@ func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServ return client, server } + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t *testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/README.md b/vendor/github.com/hashicorp/net-rpc-msgpackrpc/README.md deleted file mode 100644 index d24920c2de..0000000000 --- a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# net-rpc-msgpackrpc - -This library provides the same functions as `net/rpc/jsonrpc` but for -communicating with [MessagePack](http://msgpack.org/) instead. The library -is modeled directly after the Go standard library so it should be easy to -use and obvious. - -See the [GoDoc](http://godoc.org/github.com/hashicorp/net-rpc-msgpackrpc) for -API documentation. diff --git a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/client.go b/vendor/github.com/hashicorp/net-rpc-msgpackrpc/client.go deleted file mode 100644 index b02a55eca7..0000000000 --- a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/client.go +++ /dev/null @@ -1,43 +0,0 @@ -package msgpackrpc - -import ( - "errors" - "net/rpc" - "sync/atomic" - - "github.com/hashicorp/go-multierror" -) - -var ( - // nextCallSeq is used to assign a unique sequence number - // to each call made with CallWithCodec - nextCallSeq uint64 -) - -// CallWithCodec is used to perform the same actions as rpc.Client.Call but -// in a much cheaper way. It assumes the underlying connection is not being -// shared with multiple concurrent RPCs. The request/response must be syncronous. -func CallWithCodec(cc rpc.ClientCodec, method string, args interface{}, resp interface{}) error { - request := rpc.Request{ - Seq: atomic.AddUint64(&nextCallSeq, 1), - ServiceMethod: method, - } - if err := cc.WriteRequest(&request, args); err != nil { - return err - } - var response rpc.Response - if err := cc.ReadResponseHeader(&response); err != nil { - return err - } - if response.Error != "" { - err := errors.New(response.Error) - if readErr := cc.ReadResponseBody(nil); readErr != nil { - err = multierror.Append(err, readErr) - } - return rpc.ServerError(err.Error()) - } - if err := cc.ReadResponseBody(resp); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/codec.go b/vendor/github.com/hashicorp/net-rpc-msgpackrpc/codec.go deleted file mode 100644 index 6e73320541..0000000000 --- a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/codec.go +++ /dev/null @@ -1,122 +0,0 @@ -package msgpackrpc - -import ( - "bufio" - "io" - "net/rpc" - "sync" - - "github.com/hashicorp/go-msgpack/codec" -) - -var ( - // msgpackHandle is shared handle for decoding - msgpackHandle = &codec.MsgpackHandle{} -) - -// MsgpackCodec implements the rpc.ClientCodec and rpc.ServerCodec -// using the msgpack encoding -type MsgpackCodec struct { - closed bool - conn io.ReadWriteCloser - bufR *bufio.Reader - bufW *bufio.Writer - enc *codec.Encoder - dec *codec.Decoder - writeLock sync.Mutex -} - -// NewCodec returns a MsgpackCodec that can be used as either a Client or Server -// rpc Codec using a default handle. It also provides controls for enabling and -// disabling buffering for both reads and writes. -func NewCodec(bufReads, bufWrites bool, conn io.ReadWriteCloser) *MsgpackCodec { - return NewCodecFromHandle(bufReads, bufWrites, conn, msgpackHandle) -} - -// NewCodecFromHandle returns a MsgpackCodec that can be used as either a Client -// or Server rpc Codec using the passed handle. It also provides controls for -// enabling and disabling buffering for both reads and writes. -func NewCodecFromHandle(bufReads, bufWrites bool, conn io.ReadWriteCloser, - h *codec.MsgpackHandle) *MsgpackCodec { - cc := &MsgpackCodec{ - conn: conn, - } - if bufReads { - cc.bufR = bufio.NewReader(conn) - cc.dec = codec.NewDecoder(cc.bufR, h) - } else { - cc.dec = codec.NewDecoder(cc.conn, h) - } - if bufWrites { - cc.bufW = bufio.NewWriter(conn) - cc.enc = codec.NewEncoder(cc.bufW, h) - } else { - cc.enc = codec.NewEncoder(cc.conn, h) - } - return cc -} - -func (cc *MsgpackCodec) ReadRequestHeader(r *rpc.Request) error { - return cc.read(r) -} - -func (cc *MsgpackCodec) ReadRequestBody(out interface{}) error { - return cc.read(out) -} - -func (cc *MsgpackCodec) WriteResponse(r *rpc.Response, body interface{}) error { - cc.writeLock.Lock() - defer cc.writeLock.Unlock() - return cc.write(r, body) -} - -func (cc *MsgpackCodec) ReadResponseHeader(r *rpc.Response) error { - return cc.read(r) -} - -func (cc *MsgpackCodec) ReadResponseBody(out interface{}) error { - return cc.read(out) -} - -func (cc *MsgpackCodec) WriteRequest(r *rpc.Request, body interface{}) error { - cc.writeLock.Lock() - defer cc.writeLock.Unlock() - return cc.write(r, body) -} - -func (cc *MsgpackCodec) Close() error { - if cc.closed { - return nil - } - cc.closed = true - return cc.conn.Close() -} - -func (cc *MsgpackCodec) write(obj1, obj2 interface{}) (err error) { - if cc.closed { - return io.EOF - } - if err = cc.enc.Encode(obj1); err != nil { - return - } - if err = cc.enc.Encode(obj2); err != nil { - return - } - if cc.bufW != nil { - return cc.bufW.Flush() - } - return -} - -func (cc *MsgpackCodec) read(obj interface{}) (err error) { - if cc.closed { - return io.EOF - } - - // If nil is passed in, we should still attempt to read content to nowhere. - if obj == nil { - var obj2 interface{} - return cc.dec.Decode(&obj2) - } - return cc.dec.Decode(obj) -} diff --git a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/msgpackrpc.go b/vendor/github.com/hashicorp/net-rpc-msgpackrpc/msgpackrpc.go deleted file mode 100644 index 11e5465fd5..0000000000 --- a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/msgpackrpc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package msgpackrpc implements a MessagePack-RPC ClientCodec and ServerCodec -// for the rpc package, using the same API as the Go standard library -// for jsonrpc. -package msgpackrpc - -import ( - "io" - "net" - "net/rpc" -) - -// Dial connects to a MessagePack-RPC server at the specified network address. -func Dial(network, address string) (*rpc.Client, error) { - conn, err := net.Dial(network, address) - if err != nil { - return nil, err - } - return NewClient(conn), err -} - -// NewClient returns a new rpc.Client to handle requests to the set of -// services at the other end of the connection. -func NewClient(conn io.ReadWriteCloser) *rpc.Client { - return rpc.NewClientWithCodec(NewClientCodec(conn)) -} - -// NewClientCodec returns a new rpc.ClientCodec using MessagePack-RPC on conn. -func NewClientCodec(conn io.ReadWriteCloser) rpc.ClientCodec { - return NewCodec(true, true, conn) -} - -// NewServerCodec returns a new rpc.ServerCodec using MessagePack-RPC on conn. -func NewServerCodec(conn io.ReadWriteCloser) rpc.ServerCodec { - return NewCodec(true, true, conn) -} - -// ServeConn runs the MessagePack-RPC server on a single connection. ServeConn -// blocks, serving the connection until the client hangs up. The caller -// typically invokes ServeConn in a go statement. -func ServeConn(conn io.ReadWriteCloser) { - rpc.ServeCodec(NewServerCodec(conn)) -} diff --git a/vendor/github.com/hashicorp/scada-client/LICENSE b/vendor/github.com/hashicorp/scada-client/LICENSE deleted file mode 100644 index e87a115e46..0000000000 --- a/vendor/github.com/hashicorp/scada-client/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/scada-client/README.md b/vendor/github.com/hashicorp/scada-client/README.md deleted file mode 100644 index 301a8ec592..0000000000 --- a/vendor/github.com/hashicorp/scada-client/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# SCADA Client - -This library provides a Golang client for the [HashiCorp SCADA service](http://scada.hashicorp.com). -SCADA stands for Supervisory Control And Data Acquisition, and as the name implies it allows -[Atlas](https://atlas.hashicorp.com) to provide control functions and request data from the tools that integrate. - -The technical details about how SCADA works are fairly simple. Clients first open a connection to -the SCADA service at scada.hashicorp.com on port 7223. This connection is secured by TLS, allowing -clients to verify the identity of the servers and to encrypt all communications. Once connected, a -handshake is performed where a client provides it's Atlas API credentials so that Atlas can verify -the client identity. Once complete, clients keep the connection open in an idle state waiting for -commands to be received. Commands map to APIs exposed by the product, and are subject to any ACLs, -authentication or authorization mechanisms of the client. - -This library is used in various HashiCorp products to integrate with the SCADA system. - -## Environmental Variables - -This library respects the following environment variables: - -* ATLAS_TOKEN: The Atlas token to use for authentication -* SCADA_ENDPOINT: Overrides the default SCADA endpoint - diff --git a/vendor/github.com/hashicorp/scada-client/client.go b/vendor/github.com/hashicorp/scada-client/client.go deleted file mode 100644 index f8b3cf0670..0000000000 --- a/vendor/github.com/hashicorp/scada-client/client.go +++ /dev/null @@ -1,146 +0,0 @@ -package client - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "sync" - "time" - - "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/hashicorp/yamux" -) - -const ( - // clientPreamble is the preamble to send before upgrading - // the connection into a SCADA version 1 connection. - clientPreamble = "SCADA 1\n" - - // rpcTimeout is how long of a read deadline we provide - rpcTimeout = 10 * time.Second -) - -// Opts is used to parameterize a Dial -type Opts struct { - // Addr is the dial address - Addr string - - // TLS controls if TLS is used - TLS bool - - // TLSConfig or nil for default - TLSConfig *tls.Config - - // Modifies the log output - LogOutput io.Writer -} - -// Client is a SCADA compatible client. This is a bare bones client that -// only handles the framing and RPC protocol. Higher-level clients should -// be prefered. -type Client struct { - conn net.Conn - client *yamux.Session - - closed bool - closedLock sync.Mutex -} - -// Dial is used to establish a new connection over TCP -func Dial(addr string) (*Client, error) { - opts := Opts{Addr: addr, TLS: false} - return DialOpts(&opts) -} - -// DialTLS is used to establish a new connection using TLS/TCP -func DialTLS(addr string, tlsConf *tls.Config) (*Client, error) { - opts := Opts{Addr: addr, TLS: true, TLSConfig: tlsConf} - return DialOpts(&opts) -} - -// DialOpts is a parameterized Dial -func DialOpts(opts *Opts) (*Client, error) { - var conn net.Conn - var err error - if opts.TLS { - conn, err = tls.Dial("tcp", opts.Addr, opts.TLSConfig) - } else { - conn, err = net.DialTimeout("tcp", opts.Addr, 10*time.Second) - } - if err != nil { - return nil, err - } - return initClient(conn, opts) -} - -// initClient does the common initialization -func initClient(conn net.Conn, opts *Opts) (*Client, error) { - // Send the preamble - _, err := conn.Write([]byte(clientPreamble)) - if err != nil { - return nil, fmt.Errorf("preamble write failed: %v", err) - } - - // Wrap the connection in yamux for multiplexing - ymConf := yamux.DefaultConfig() - if opts.LogOutput != nil { - ymConf.LogOutput = opts.LogOutput - } - client, _ := yamux.Client(conn, ymConf) - - // Create the client - c := &Client{ - conn: conn, - client: client, - } - return c, nil -} - -// Close is used to terminate the client connection -func (c *Client) Close() error { - c.closedLock.Lock() - defer c.closedLock.Unlock() - - if c.closed { - return nil - } - c.closed = true - c.client.GoAway() // Notify the other side of the close - return c.client.Close() -} - -// RPC is used to perform an RPC -func (c *Client) RPC(method string, args interface{}, resp interface{}) error { - // Get a stream - stream, err := c.Open() - if err != nil { - return fmt.Errorf("failed to open stream: %v", err) - } - defer stream.Close() - stream.SetDeadline(time.Now().Add(rpcTimeout)) - - // Create the RPC client - cc := msgpackrpc.NewCodec(true, true, stream) - return msgpackrpc.CallWithCodec(cc, method, args, resp) -} - -// Accept is used to accept an incoming connection -func (c *Client) Accept() (net.Conn, error) { - return c.client.Accept() -} - -// Open is used to open an outgoing connection -func (c *Client) Open() (net.Conn, error) { - return c.client.Open() -} - -// Addr is so that client can act like a net.Listener -func (c *Client) Addr() net.Addr { - return c.client.LocalAddr() -} - -// NumStreams returns the number of open streams on the client -func (c *Client) NumStreams() int { - return c.client.NumStreams() -} diff --git a/vendor/github.com/hashicorp/scada-client/provider.go b/vendor/github.com/hashicorp/scada-client/provider.go deleted file mode 100644 index 0563ebca91..0000000000 --- a/vendor/github.com/hashicorp/scada-client/provider.go +++ /dev/null @@ -1,473 +0,0 @@ -package client - -import ( - "crypto/tls" - "fmt" - "io" - "log" - "math/rand" - "net" - "net/rpc" - "os" - "strings" - "sync" - "time" - - "github.com/armon/go-metrics" - "github.com/hashicorp/net-rpc-msgpackrpc" -) - -const ( - // DefaultEndpoint is the endpoint used if none is provided - DefaultEndpoint = "scada.hashicorp.com:7223" - - // DefaultBackoff is the amount of time we back off if we encounter - // and error, and no specific backoff is available. - DefaultBackoff = 120 * time.Second - - // DisconnectDelay is how long we delay the disconnect to allow - // the RPC to complete. - DisconnectDelay = time.Second -) - -// CapabilityProvider is used to provide a given capability -// when requested remotely. They must return a connection -// that is bridged or an error. -type CapabilityProvider func(capability string, meta map[string]string, conn io.ReadWriteCloser) error - -// ProviderService is the service being exposed -type ProviderService struct { - Service string - ServiceVersion string - Capabilities map[string]int - Meta map[string]string - ResourceType string -} - -// ProviderConfig is used to parameterize a provider -type ProviderConfig struct { - // Endpoint is the SCADA endpoint, defaults to DefaultEndpoint - Endpoint string - - // Service is the service to expose - Service *ProviderService - - // Handlers are invoked to provide the named capability - Handlers map[string]CapabilityProvider - - // ResourceGroup is the named group e.g. "hashicorp/prod" - ResourceGroup string - - // Token is the Atlas authentication token - Token string - - // Optional TLS configuration, defaults used otherwise - TLSConfig *tls.Config - - // LogOutput is to control the log output - LogOutput io.Writer -} - -// Provider is a high-level interface to SCADA by which -// clients declare themselves as a service providing capabilities. -// Provider manages the client/server interactions required, -// making it simpler to integrate. -type Provider struct { - config *ProviderConfig - logger *log.Logger - - client *Client - clientLock sync.Mutex - - noRetry bool // set when the server instructs us to not retry - backoff time.Duration // set when the server provides a longer backoff - backoffLock sync.Mutex - - sessionID string - sessionAuth bool - sessionLock sync.RWMutex - - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex -} - -// validateConfig is used to sanity check the configuration -func validateConfig(config *ProviderConfig) error { - // Validate the inputs - if config == nil { - return fmt.Errorf("missing config") - } - if config.Service == nil { - return fmt.Errorf("missing service") - } - if config.Service.Service == "" { - return fmt.Errorf("missing service name") - } - if config.Service.ServiceVersion == "" { - return fmt.Errorf("missing service version") - } - if config.Service.ResourceType == "" { - return fmt.Errorf("missing service resource type") - } - if config.Handlers == nil && len(config.Service.Capabilities) != 0 { - return fmt.Errorf("missing handlers") - } - for c := range config.Service.Capabilities { - if _, ok := config.Handlers[c]; !ok { - return fmt.Errorf("missing handler for '%s' capability", c) - } - } - if config.ResourceGroup == "" { - return fmt.Errorf("missing resource group") - } - if config.Token == "" { - config.Token = os.Getenv("ATLAS_TOKEN") - } - if config.Token == "" { - return fmt.Errorf("missing token") - } - - // Default the endpoint - if config.Endpoint == "" { - config.Endpoint = DefaultEndpoint - if end := os.Getenv("SCADA_ENDPOINT"); end != "" { - config.Endpoint = end - } - } - return nil -} - -// NewProvider is used to create a new provider -func NewProvider(config *ProviderConfig) (*Provider, error) { - if err := validateConfig(config); err != nil { - return nil, err - } - - // Create logger - if config.LogOutput == nil { - config.LogOutput = os.Stderr - } - logger := log.New(config.LogOutput, "", log.LstdFlags) - - p := &Provider{ - config: config, - logger: logger, - shutdownCh: make(chan struct{}), - } - go p.run() - return p, nil -} - -// Shutdown is used to close the provider -func (p *Provider) Shutdown() { - p.shutdownLock.Lock() - p.shutdownLock.Unlock() - if p.shutdown { - return - } - p.shutdown = true - close(p.shutdownCh) -} - -// IsShutdown checks if we have been shutdown -func (p *Provider) IsShutdown() bool { - select { - case <-p.shutdownCh: - return true - default: - return false - } -} - -// backoffDuration is used to compute the next backoff duration -func (p *Provider) backoffDuration() time.Duration { - // Use the default backoff - backoff := DefaultBackoff - - // Check for a server specified backoff - p.backoffLock.Lock() - if p.backoff != 0 { - backoff = p.backoff - } - if p.noRetry { - backoff = 0 - } - p.backoffLock.Unlock() - - return backoff -} - -// wait is used to delay dialing on an error -func (p *Provider) wait() { - // Compute the backoff time - backoff := p.backoffDuration() - - // Setup a wait timer - var wait <-chan time.Time - if backoff > 0 { - jitter := time.Duration(rand.Uint32()) % backoff - wait = time.After(backoff + jitter) - } - - // Wait until timer or shutdown - select { - case <-wait: - case <-p.shutdownCh: - } -} - -// run is a long running routine to manage the provider -func (p *Provider) run() { - for !p.IsShutdown() { - // Setup a new connection - client, err := p.clientSetup() - if err != nil { - p.wait() - continue - } - - // Handle the session - doneCh := make(chan struct{}) - go p.handleSession(client, doneCh) - - // Wait for session termination or shutdown - select { - case <-doneCh: - p.wait() - case <-p.shutdownCh: - p.clientLock.Lock() - client.Close() - p.clientLock.Unlock() - return - } - } -} - -// handleSession is used to handle an established session -func (p *Provider) handleSession(list net.Listener, doneCh chan struct{}) { - defer close(doneCh) - defer list.Close() - // Accept new connections - for !p.IsShutdown() { - conn, err := list.Accept() - if err != nil { - p.logger.Printf("[ERR] scada-client: failed to accept connection: %v", err) - return - } - p.logger.Printf("[DEBUG] scada-client: accepted connection") - go p.handleConnection(conn) - } -} - -// handleConnection handles an incoming connection -func (p *Provider) handleConnection(conn net.Conn) { - // Create an RPC server to handle inbound - pe := &providerEndpoint{p: p} - rpcServer := rpc.NewServer() - rpcServer.RegisterName("Client", pe) - rpcCodec := msgpackrpc.NewCodec(false, false, conn) - - defer func() { - if !pe.hijacked() { - conn.Close() - } - }() - - for !p.IsShutdown() { - if err := rpcServer.ServeRequest(rpcCodec); err != nil { - if err != io.EOF && !strings.Contains(err.Error(), "closed") { - p.logger.Printf("[ERR] scada-client: RPC error: %v", err) - } - return - } - - // Handle potential hijack in Client.Connect - if pe.hijacked() { - cb := pe.getHijack() - cb(conn) - return - } - } -} - -// clientSetup is used to setup a new connection -func (p *Provider) clientSetup() (*Client, error) { - defer metrics.MeasureSince([]string{"scada", "setup"}, time.Now()) - - // Reset the previous backoff - p.backoffLock.Lock() - p.noRetry = false - p.backoff = 0 - p.backoffLock.Unlock() - - // Dial a new connection - opts := Opts{ - Addr: p.config.Endpoint, - TLS: true, - TLSConfig: p.config.TLSConfig, - LogOutput: p.config.LogOutput, - } - client, err := DialOpts(&opts) - if err != nil { - p.logger.Printf("[ERR] scada-client: failed to dial: %v", err) - return nil, err - } - - // Perform a handshake - resp, err := p.handshake(client) - if err != nil { - p.logger.Printf("[ERR] scada-client: failed to handshake: %v", err) - client.Close() - return nil, err - } - if resp != nil && resp.SessionID != "" { - p.logger.Printf("[DEBUG] scada-client: assigned session '%s'", resp.SessionID) - } - if resp != nil && !resp.Authenticated { - p.logger.Printf("[WARN] scada-client: authentication failed: %v", resp.Reason) - } - - // Set the new client - p.clientLock.Lock() - if p.client != nil { - p.client.Close() - } - p.client = client - p.clientLock.Unlock() - - p.sessionLock.Lock() - p.sessionID = resp.SessionID - p.sessionAuth = resp.Authenticated - p.sessionLock.Unlock() - - return client, nil -} - -// SessionID provides the current session ID -func (p *Provider) SessionID() string { - p.sessionLock.RLock() - defer p.sessionLock.RUnlock() - return p.sessionID -} - -// SessionAuth checks if the current session is authenticated -func (p *Provider) SessionAuthenticated() bool { - p.sessionLock.RLock() - defer p.sessionLock.RUnlock() - return p.sessionAuth -} - -// handshake does the initial handshake -func (p *Provider) handshake(client *Client) (*HandshakeResponse, error) { - defer metrics.MeasureSince([]string{"scada", "handshake"}, time.Now()) - req := HandshakeRequest{ - Service: p.config.Service.Service, - ServiceVersion: p.config.Service.ServiceVersion, - Capabilities: p.config.Service.Capabilities, - Meta: p.config.Service.Meta, - ResourceType: p.config.Service.ResourceType, - ResourceGroup: p.config.ResourceGroup, - Token: p.config.Token, - } - resp := new(HandshakeResponse) - if err := client.RPC("Session.Handshake", &req, resp); err != nil { - return nil, err - } - return resp, nil -} - -type HijackFunc func(io.ReadWriteCloser) - -// providerEndpoint is used to implement the Client.* RPC endpoints -// as part of the provider. -type providerEndpoint struct { - p *Provider - hijack HijackFunc -} - -// Hijacked is used to check if the connection has been hijacked -func (pe *providerEndpoint) hijacked() bool { - return pe.hijack != nil -} - -// GetHijack returns the hijack function -func (pe *providerEndpoint) getHijack() HijackFunc { - return pe.hijack -} - -// Hijack is used to take over the yamux stream for Client.Connect -func (pe *providerEndpoint) setHijack(cb HijackFunc) { - pe.hijack = cb -} - -// Connect is invoked by the broker to connect to a capability -func (pe *providerEndpoint) Connect(args *ConnectRequest, resp *ConnectResponse) error { - defer metrics.IncrCounter([]string{"scada", "connect", args.Capability}, 1) - pe.p.logger.Printf("[INFO] scada-client: connect requested (capability: %s)", - args.Capability) - - // Handle potential flash - if args.Severity != "" && args.Message != "" { - pe.p.logger.Printf("[%s] scada-client: %s", args.Severity, args.Message) - } - - // Look for the handler - handler := pe.p.config.Handlers[args.Capability] - if handler == nil { - pe.p.logger.Printf("[WARN] scada-client: requested capability '%s' not available", - args.Capability) - return fmt.Errorf("invalid capability") - } - - // Hijack the connection - pe.setHijack(func(a io.ReadWriteCloser) { - if err := handler(args.Capability, args.Meta, a); err != nil { - pe.p.logger.Printf("[ERR] scada-client: '%s' handler error: %v", - args.Capability, err) - } - }) - resp.Success = true - return nil -} - -// Disconnect is invoked by the broker to ask us to backoff -func (pe *providerEndpoint) Disconnect(args *DisconnectRequest, resp *DisconnectResponse) error { - defer metrics.IncrCounter([]string{"scada", "disconnect"}, 1) - if args.Reason == "" { - args.Reason = "" - } - pe.p.logger.Printf("[INFO] scada-client: disconnect requested (retry: %v, backoff: %v): %v", - !args.NoRetry, args.Backoff, args.Reason) - - // Use the backoff information - pe.p.backoffLock.Lock() - pe.p.noRetry = args.NoRetry - pe.p.backoff = args.Backoff - pe.p.backoffLock.Unlock() - - // Clear the session information - pe.p.sessionLock.Lock() - pe.p.sessionID = "" - pe.p.sessionAuth = false - pe.p.sessionLock.Unlock() - - // Force the disconnect - time.AfterFunc(DisconnectDelay, func() { - pe.p.clientLock.Lock() - if pe.p.client != nil { - pe.p.client.Close() - } - pe.p.clientLock.Unlock() - }) - return nil -} - -// Flash is invoked by the broker log a message -func (pe *providerEndpoint) Flash(args *FlashRequest, resp *FlashResponse) error { - defer metrics.IncrCounter([]string{"scada", "flash"}, 1) - if args.Severity != "" && args.Message != "" { - pe.p.logger.Printf("[%s] scada-client: %s", args.Severity, args.Message) - } - return nil -} diff --git a/vendor/github.com/hashicorp/scada-client/scada/scada.go b/vendor/github.com/hashicorp/scada-client/scada/scada.go deleted file mode 100644 index 2c10997c98..0000000000 --- a/vendor/github.com/hashicorp/scada-client/scada/scada.go +++ /dev/null @@ -1,231 +0,0 @@ -package scada - -import ( - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "os" - "sync" - "time" - - sc "github.com/hashicorp/scada-client" -) - -// Provider wraps scada-client.Provider to allow most applications to only pull -// in this package -type Provider struct { - *sc.Provider -} - -type AtlasConfig struct { - // Endpoint is the SCADA endpoint used for Atlas integration. If empty, the - // defaults from the provider are used. - Endpoint string `mapstructure:"endpoint"` - - // The name of the infrastructure we belong to, e.g. "hashicorp/prod" - Infrastructure string `mapstructure:"infrastructure"` - - // The Atlas authentication token - Token string `mapstructure:"token" json:"-"` -} - -// Config holds the high-level information used to instantiate a SCADA provider -// and listener -type Config struct { - // The service name to use - Service string - - // The version of the service - Version string - - // The type of resource we represent - ResourceType string - - // Metadata to send to along with the service information - Meta map[string]string - - // If set, TLS certificate verification will be skipped. The value of the - // SCADA_INSECURE environment variable will be considered if this is false. - // If using SCADA_INSECURE, any non-empty value will trigger insecure mode. - Insecure bool - - // Holds Atlas configuration - Atlas AtlasConfig -} - -// ProviderService returns the service information for the provider -func providerService(c *Config) *sc.ProviderService { - ret := &sc.ProviderService{ - Service: c.Service, - ServiceVersion: c.Version, - Capabilities: map[string]int{}, - Meta: c.Meta, - ResourceType: c.ResourceType, - } - - return ret -} - -// providerConfig returns the configuration for the SCADA provider -func providerConfig(c *Config) *sc.ProviderConfig { - ret := &sc.ProviderConfig{ - Service: providerService(c), - Handlers: map[string]sc.CapabilityProvider{}, - Endpoint: c.Atlas.Endpoint, - ResourceGroup: c.Atlas.Infrastructure, - Token: c.Atlas.Token, - } - - // SCADA_INSECURE env variable is used for testing to disable TLS - // certificate verification. - insecure := c.Insecure - if !insecure { - if os.Getenv("SCADA_INSECURE") != "" { - insecure = true - } - } - if insecure { - ret.TLSConfig = &tls.Config{ - InsecureSkipVerify: true, - } - } - - return ret -} - -// NewProvider creates a new SCADA provider using the given configuration. -// Requests for the HTTP capability are passed off to the listener that is -// returned. -func NewHTTPProvider(c *Config, logOutput io.Writer) (*Provider, net.Listener, error) { - // Get the configuration of the provider - config := providerConfig(c) - config.LogOutput = logOutput - - // Set the HTTP capability - config.Service.Capabilities["http"] = 1 - - // Create an HTTP listener and handler - list := newScadaListener(c.Atlas.Infrastructure) - config.Handlers["http"] = func(capability string, meta map[string]string, - conn io.ReadWriteCloser) error { - return list.PushRWC(conn) - } - - // Create the provider - provider, err := sc.NewProvider(config) - if err != nil { - list.Close() - return nil, nil, err - } - - return &Provider{provider}, list, nil -} - -// scadaListener is used to return a net.Listener for -// incoming SCADA connections -type scadaListener struct { - addr *scadaAddr - pending chan net.Conn - - closed bool - closedCh chan struct{} - l sync.Mutex -} - -// newScadaListener returns a new listener -func newScadaListener(infra string) *scadaListener { - l := &scadaListener{ - addr: &scadaAddr{infra}, - pending: make(chan net.Conn), - closedCh: make(chan struct{}), - } - return l -} - -// PushRWC is used to push a io.ReadWriteCloser as a net.Conn -func (s *scadaListener) PushRWC(conn io.ReadWriteCloser) error { - // Check if this already implements net.Conn - if nc, ok := conn.(net.Conn); ok { - return s.Push(nc) - } - - // Wrap to implement the interface - wrapped := &scadaRWC{conn, s.addr} - return s.Push(wrapped) -} - -// Push is used to add a connection to the queu -func (s *scadaListener) Push(conn net.Conn) error { - select { - case s.pending <- conn: - return nil - case <-time.After(time.Second): - return fmt.Errorf("accept timed out") - case <-s.closedCh: - return fmt.Errorf("scada listener closed") - } -} - -func (s *scadaListener) Accept() (net.Conn, error) { - select { - case conn := <-s.pending: - return conn, nil - case <-s.closedCh: - return nil, fmt.Errorf("scada listener closed") - } -} - -func (s *scadaListener) Close() error { - s.l.Lock() - defer s.l.Unlock() - if s.closed { - return nil - } - s.closed = true - close(s.closedCh) - return nil -} - -func (s *scadaListener) Addr() net.Addr { - return s.addr -} - -// scadaAddr is used to return a net.Addr for SCADA -type scadaAddr struct { - infra string -} - -func (s *scadaAddr) Network() string { - return "SCADA" -} - -func (s *scadaAddr) String() string { - return fmt.Sprintf("SCADA::Atlas::%s", s.infra) -} - -type scadaRWC struct { - io.ReadWriteCloser - addr *scadaAddr -} - -func (s *scadaRWC) LocalAddr() net.Addr { - return s.addr -} - -func (s *scadaRWC) RemoteAddr() net.Addr { - return s.addr -} - -func (s *scadaRWC) SetDeadline(t time.Time) error { - return errors.New("SCADA.Conn does not support deadlines") -} - -func (s *scadaRWC) SetReadDeadline(t time.Time) error { - return errors.New("SCADA.Conn does not support deadlines") -} - -func (s *scadaRWC) SetWriteDeadline(t time.Time) error { - return errors.New("SCADA.Conn does not support deadlines") -} diff --git a/vendor/github.com/hashicorp/scada-client/structs.go b/vendor/github.com/hashicorp/scada-client/structs.go deleted file mode 100644 index f09777c650..0000000000 --- a/vendor/github.com/hashicorp/scada-client/structs.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import "time" - -// HandshakeRequest is used to authenticate the session -type HandshakeRequest struct { - Service string - ServiceVersion string - Capabilities map[string]int - Meta map[string]string - ResourceType string - ResourceGroup string - Token string -} - -type HandshakeResponse struct { - Authenticated bool - SessionID string - Reason string -} - -type ConnectRequest struct { - Capability string - Meta map[string]string - - Severity string - Message string -} - -type ConnectResponse struct { - Success bool -} - -type DisconnectRequest struct { - NoRetry bool // Should the client retry - Backoff time.Duration // Minimum backoff - Reason string -} - -type DisconnectResponse struct { -} - -type FlashRequest struct { - Severity string - Message string -} - -type FlashResponse struct { -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go index 46224ad66d..449e5af171 100644 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go @@ -384,18 +384,20 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r err = errors.StructuralError("empty key flags subpacket") return } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true + if subpacket[0] != 0 { + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } } case reasonForRevocationSubpacket: // Reason For Revocation, section 5.2.3.23 diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go index 3b322bc062..3747ffcaf8 100644 --- a/vendor/github.com/lib/pq/conn.go +++ b/vendor/github.com/lib/pq/conn.go @@ -1339,7 +1339,12 @@ func (rs *rows) Close() error { switch err { case nil: case io.EOF: - return nil + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } default: return err } diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 6194eb92d2..575a1343cd 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -411,11 +411,6 @@ func (w *Writer) Write(data []byte) (n int, err error) { var bw [1]byte loop: for { - r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - if r1 == 0 { - break loop - } - c1, err := er.ReadByte() if err != nil { break loop @@ -460,6 +455,7 @@ loop: if err != nil { continue } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y -= short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'B': @@ -467,6 +463,7 @@ loop: if err != nil { continue } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y += short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'C': @@ -474,23 +471,23 @@ loop: if err != nil { continue } - csbi.cursorPosition.x -= short(n) + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'D': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - if n, err = strconv.Atoi(buf.String()); err == nil { - var csbi consoleScreenBufferInfo - csbi.cursorPosition.x += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'E': n, err = strconv.Atoi(buf.String()) if err != nil { continue } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y += short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) @@ -499,6 +496,7 @@ loop: if err != nil { continue } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y -= short(n) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) @@ -507,9 +505,11 @@ loop: if err != nil { continue } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = short(n - 1) procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'H': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) if buf.Len() > 0 { token := strings.Split(buf.String(), ";") switch len(token) { @@ -545,6 +545,7 @@ loop: } var count, written dword var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) switch n { case 0: cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} @@ -566,6 +567,7 @@ loop: continue } } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) var cursor coord switch n { case 0: @@ -580,6 +582,7 @@ loop: procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) attr := csbi.attributes cs := buf.String() if cs == "" { @@ -700,6 +703,7 @@ loop: procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) } case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) w.oldpos = csbi.cursorPosition case 'u': procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) diff --git a/vendor/github.com/michaelklishin/rabbit-hole/README.md b/vendor/github.com/michaelklishin/rabbit-hole/README.md index 22a5845f7b..473c072d72 100644 --- a/vendor/github.com/michaelklishin/rabbit-hole/README.md +++ b/vendor/github.com/michaelklishin/rabbit-hole/README.md @@ -140,6 +140,10 @@ x, err := rmqc.GetUser("my.user") resp, err := rmqc.PutUser("my.user", UserSettings{Password: "s3krE7", Tags: "management,policymaker"}) // => *http.Response, err +// creates or updates individual user with no password +resp, err := rmqc.PutUserWithoutPassword("my.user", UserSettings{Tags: "management,policymaker"}) +// => *http.Response, err + // deletes individual user resp, err := rmqc.DeleteUser("my.user") // => *http.Response, err diff --git a/vendor/github.com/michaelklishin/rabbit-hole/client.go b/vendor/github.com/michaelklishin/rabbit-hole/client.go index 96c112ec56..f4bf37096c 100644 --- a/vendor/github.com/michaelklishin/rabbit-hole/client.go +++ b/vendor/github.com/michaelklishin/rabbit-hole/client.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "net/url" + "time" ) type Client struct { @@ -17,6 +18,7 @@ type Client struct { Password string host string transport *http.Transport + timeout time.Duration } func NewClient(uri string, username string, password string) (me *Client, err error) { @@ -58,6 +60,12 @@ func (c *Client) SetTransport(transport *http.Transport) { c.transport = transport } +// SetTimeout changes the HTTP timeout that the Client will use. +// By default there is no timeout. +func (c *Client) SetTimeout(timeout time.Duration) { + c.timeout = timeout +} + func newGETRequest(client *Client, path string) (*http.Request, error) { s := client.Endpoint + "/api/" + path req, err := http.NewRequest("GET", s, nil) @@ -97,29 +105,17 @@ func newRequestWithBody(client *Client, method string, path string, body []byte) } func executeRequest(client *Client, req *http.Request) (res *http.Response, err error) { - var httpc *http.Client + httpc := &http.Client{ + Timeout: client.timeout, + } if client.transport != nil { - httpc = &http.Client{Transport: client.transport} - } else { - httpc = &http.Client{} + httpc.Transport = client.transport } - res, err = httpc.Do(req) - - if err != nil { - return nil, err - } - - return res, nil + return httpc.Do(req) } func executeAndParseRequest(client *Client, req *http.Request, rec interface{}) (err error) { - var httpc *http.Client - if client.transport != nil { - httpc = &http.Client{Transport: client.transport} - } else { - httpc = &http.Client{} - } - res, err := httpc.Do(req) + res, err := executeRequest(client, req) if err != nil { return err } diff --git a/vendor/github.com/michaelklishin/rabbit-hole/users.go b/vendor/github.com/michaelklishin/rabbit-hole/users.go index 8c319154cc..ec3a7e362b 100644 --- a/vendor/github.com/michaelklishin/rabbit-hole/users.go +++ b/vendor/github.com/michaelklishin/rabbit-hole/users.go @@ -88,6 +88,25 @@ func (c *Client) PutUser(username string, info UserSettings) (res *http.Response return res, nil } +func (c *Client) PutUserWithoutPassword(username string, info UserSettings) (res *http.Response, err error) { + body, err := json.Marshal(UserInfo{Tags: info.Tags}) + if err != nil { + return nil, err + } + + req, err := newRequestWithBody(c, "PUT", "users/"+PathEscape(username), body) + if err != nil { + return nil, err + } + + res, err = executeRequest(c, req) + if err != nil { + return nil, err + } + + return res, nil +} + // // DELETE /api/users/{name} // diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md index dd211cf0e7..8f02cdd0a3 100644 --- a/vendor/github.com/mitchellh/cli/README.md +++ b/vendor/github.com/mitchellh/cli/README.md @@ -18,6 +18,9 @@ cli is the library that powers the CLI for * Optional support for default subcommands so `cli` does something other than error. +* Support for shell autocompletion of subcommands, flags, and arguments + with callbacks in Go. You don't need to write any shell code. + * Automatic help generation for listing subcommands * Automatic help flag recognition of `-h`, `--help`, etc. diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go new file mode 100644 index 0000000000..3bec6258f0 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/autocomplete.go @@ -0,0 +1,43 @@ +package cli + +import ( + "github.com/posener/complete/cmd/install" +) + +// autocompleteInstaller is an interface to be implemented to perform the +// autocomplete installation and uninstallation with a CLI. +// +// This interface is not exported because it only exists for unit tests +// to be able to test that the installation is called properly. +type autocompleteInstaller interface { + Install(string) error + Uninstall(string) error +} + +// realAutocompleteInstaller uses the real install package to do the +// install/uninstall. +type realAutocompleteInstaller struct{} + +func (i *realAutocompleteInstaller) Install(cmd string) error { + return install.Install(cmd) +} + +func (i *realAutocompleteInstaller) Uninstall(cmd string) error { + return install.Uninstall(cmd) +} + +// mockAutocompleteInstaller is used for tests to record the install/uninstall. +type mockAutocompleteInstaller struct { + InstallCalled bool + UninstallCalled bool +} + +func (i *mockAutocompleteInstaller) Install(cmd string) error { + i.InstallCalled = true + return nil +} + +func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { + i.UninstallCalled = true + return nil +} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go index 4a69d176db..e0774fd389 100644 --- a/vendor/github.com/mitchellh/cli/cli.go +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -11,6 +11,7 @@ import ( "text/template" "github.com/armon/go-radix" + "github.com/posener/complete" ) // CLI contains the state necessary to run subcommands and parse the @@ -66,6 +67,32 @@ type CLI struct { // Version of the CLI. Version string + // Autocomplete enables or disables subcommand auto-completion support. + // This is enabled by default when NewCLI is called. Otherwise, this + // must enabled explicitly. + // + // Autocomplete requires the "Name" option to be set on CLI. This name + // should be set exactly to the binary name that is autocompleted. + // + // Autocompletion is supported via the github.com/posener/complete + // library. This library supports both bash and zsh. To add support + // for other shells, please see that library. + // + // AutocompleteInstall and AutocompleteUninstall are the global flag + // names for installing and uninstalling the autocompletion handlers + // for the user's shell. The flag should omit the hyphen(s) in front of + // the value. Both single and double hyphens will automatically be supported + // for the flag name. These default to `autocomplete-install` and + // `autocomplete-uninstall` respectively. + // + // AutocompleteGlobalFlags are a mapping of global flags for + // autocompletion. The help and version flags are automatically added. + Autocomplete bool + AutocompleteInstall string + AutocompleteUninstall string + AutocompleteGlobalFlags complete.Flags + autocompleteInstaller autocompleteInstaller // For tests + // HelpFunc and HelpWriter are used to output help information, if // requested. // @@ -78,23 +105,32 @@ type CLI struct { HelpFunc HelpFunc HelpWriter io.Writer + //--------------------------------------------------------------- + // Internal fields set automatically + once sync.Once + autocomplete *complete.Complete commandTree *radix.Tree commandNested bool - isHelp bool subcommand string subcommandArgs []string topFlags []string - isVersion bool + // These are true when special global flags are set. We can/should + // probably use a bitset for this one day. + isHelp bool + isVersion bool + isAutocompleteInstall bool + isAutocompleteUninstall bool } // NewClI returns a new CLI instance with sensible defaults. func NewCLI(app, version string) *CLI { return &CLI{ - Name: app, - Version: version, - HelpFunc: BasicHelpFunc(app), + Name: app, + Version: version, + HelpFunc: BasicHelpFunc(app), + Autocomplete: true, } } @@ -129,6 +165,45 @@ func (c *CLI) Run() (int, error) { return 0, nil } + // If we're attempting to install or uninstall autocomplete then handle + if c.Autocomplete { + // Autocomplete requires the "Name" to be set so that we know what + // command to setup the autocomplete on. + if c.Name == "" { + return 1, fmt.Errorf( + "internal error: CLI.Name must be specified for autocomplete to work") + } + + // If both install and uninstall flags are specified, then error + if c.isAutocompleteInstall && c.isAutocompleteUninstall { + return 1, fmt.Errorf( + "Either the autocomplete install or uninstall flag may " + + "be specified, but not both.") + } + + // If the install flag is specified, perform the install or uninstall + if c.isAutocompleteInstall { + if err := c.autocompleteInstaller.Install(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + + if c.isAutocompleteUninstall { + if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + + // If this is a autocompletion request, satisfy it + if c.autocomplete.Complete() { + return 0, nil + } + } + // Attempt to get the factory function for creating the command // implementation. If the command is invalid or blank, it is an error. raw, ok := c.commandTree.Get(c.Subcommand()) @@ -268,10 +343,111 @@ func (c *CLI) init() { } } + // Setup autocomplete if we have it enabled. We have to do this after + // the command tree is setup so we can use the radix tree to easily find + // all subcommands. + if c.Autocomplete { + c.initAutocomplete() + } + // Process the args c.processArgs() } +func (c *CLI) initAutocomplete() { + if c.AutocompleteInstall == "" { + c.AutocompleteInstall = defaultAutocompleteInstall + } + + if c.AutocompleteUninstall == "" { + c.AutocompleteUninstall = defaultAutocompleteUninstall + } + + if c.autocompleteInstaller == nil { + c.autocompleteInstaller = &realAutocompleteInstaller{} + } + + // Build the root command + cmd := c.initAutocompleteSub("") + + // For the root, we add the global flags to the "Flags". This way + // they don't show up on every command. + cmd.Flags = map[string]complete.Predictor{ + "-" + c.AutocompleteInstall: complete.PredictNothing, + "-" + c.AutocompleteUninstall: complete.PredictNothing, + "-help": complete.PredictNothing, + "-version": complete.PredictNothing, + } + cmd.GlobalFlags = c.AutocompleteGlobalFlags + + c.autocomplete = complete.New(c.Name, cmd) +} + +// initAutocompleteSub creates the complete.Command for a subcommand with +// the given prefix. This will continue recursively for all subcommands. +// The prefix "" (empty string) can be used for the root command. +func (c *CLI) initAutocompleteSub(prefix string) complete.Command { + var cmd complete.Command + walkFn := func(k string, raw interface{}) bool { + if len(prefix) > 0 { + // If we have a prefix, trim the prefix + 1 (for the space) + // Example: turns "sub one" to "one" with prefix "sub" + k = k[len(prefix)+1:] + } + + // Keep track of the full key so that we can nest further if necessary + fullKey := k + + if idx := strings.LastIndex(k, " "); idx >= 0 { + // If there is a space, we trim up to the space + k = k[:idx] + } + + if idx := strings.LastIndex(k, " "); idx >= 0 { + // This catches the scenario just in case where we see "sub one" + // before "sub". This will let us properly setup the subcommand + // regardless. + k = k[idx+1:] + } + + if _, ok := cmd.Sub[k]; ok { + // If we already tracked this subcommand then ignore + return false + } + + if cmd.Sub == nil { + cmd.Sub = complete.Commands(make(map[string]complete.Command)) + } + subCmd := c.initAutocompleteSub(fullKey) + + // Instantiate the command so that we can check if the command is + // a CommandAutocomplete implementation. If there is an error + // creating the command, we just ignore it since that will be caught + // later. + impl, err := raw.(CommandFactory)() + if err != nil { + impl = nil + } + + // Check if it implements ComandAutocomplete. If so, setup the autocomplete + if c, ok := impl.(CommandAutocomplete); ok { + subCmd.Args = c.AutocompleteArgs() + subCmd.Flags = c.AutocompleteFlags() + } + + cmd.Sub[k] = subCmd + return false + } + + walkPrefix := prefix + if walkPrefix != "" { + walkPrefix += " " + } + + c.commandTree.WalkPrefix(walkPrefix, walkFn) + return cmd +} + func (c *CLI) commandHelp(command Command) { // Get the template to use tpl := strings.TrimSpace(defaultHelpTemplate) @@ -404,6 +580,19 @@ func (c *CLI) processArgs() { continue } + // Check for autocomplete flags + if c.Autocomplete { + if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { + c.isAutocompleteInstall = true + continue + } + + if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { + c.isAutocompleteUninstall = true + continue + } + } + if c.subcommand == "" { // Check for version flags if not in a subcommand. if arg == "-v" || arg == "-version" || arg == "--version" { @@ -456,6 +645,11 @@ func (c *CLI) processArgs() { } } +// defaultAutocompleteInstall and defaultAutocompleteUninstall are the +// default values for the autocomplete install and uninstall flags. +const defaultAutocompleteInstall = "autocomplete-install" +const defaultAutocompleteUninstall = "autocomplete-uninstall" + const defaultHelpTemplate = ` {{.Help}}{{if gt (len .Subcommands) 0}} diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go index b4924eb000..bed11faf57 100644 --- a/vendor/github.com/mitchellh/cli/command.go +++ b/vendor/github.com/mitchellh/cli/command.go @@ -1,5 +1,9 @@ package cli +import ( + "github.com/posener/complete" +) + const ( // RunResultHelp is a value that can be returned from Run to signal // to the CLI to render the help output. @@ -26,6 +30,22 @@ type Command interface { Synopsis() string } +// CommandAutocomplete is an extension of Command that enables fine-grained +// autocompletion. Subcommand autocompletion will work even if this interface +// is not implemented. By implementing this interface, more advanced +// autocompletion is enabled. +type CommandAutocomplete interface { + // AutocompleteArgs returns the argument predictor for this command. + // If argument completion is not supported, this should return + // complete.PredictNothing. + AutocompleteArgs() complete.Predictor + + // AutocompleteFlags returns a mapping of supported flags and autocomplete + // options for this command. The map key for the Flags map should be the + // complete flag such as "-foo" or "--foo". + AutocompleteFlags() complete.Flags +} + // CommandHelpTemplate is an extension of Command that also has a function // for returning a template for the help rather than the help itself. In // this scenario, both Help and HelpTemplate should be implemented. diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go index 6371e573bc..7a584b7e9b 100644 --- a/vendor/github.com/mitchellh/cli/command_mock.go +++ b/vendor/github.com/mitchellh/cli/command_mock.go @@ -1,5 +1,9 @@ package cli +import ( + "github.com/posener/complete" +) + // MockCommand is an implementation of Command that can be used for tests. // It is publicly exported from this package in case you want to use it // externally. @@ -29,6 +33,23 @@ func (c *MockCommand) Synopsis() string { return c.SynopsisText } +// MockCommandAutocomplete is an implementation of CommandAutocomplete. +type MockCommandAutocomplete struct { + MockCommand + + // Settable + AutocompleteArgsValue complete.Predictor + AutocompleteFlagsValue complete.Flags +} + +func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { + return c.AutocompleteArgsValue +} + +func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { + return c.AutocompleteFlagsValue +} + // MockCommandHelpTemplate is an implementation of CommandHelpTemplate. type MockCommandHelpTemplate struct { MockCommand diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index 8475ff7419..fe799bd698 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -37,7 +37,7 @@ type ImageConfig struct { // Cmd defines the default arguments to the entrypoint of the container. Cmd []string `json:"Cmd,omitempty"` - // Volumes is a set of directories which should be created as data volumes in a container running this image. + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. Volumes map[string]struct{} `json:"Volumes,omitempty"` // WorkingDir sets the current working directory of the entrypoint process in the container. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index f4cda6ed8d..7d237e538e 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc6-dev" + VersionDev = "-rc7-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 42e406652c..4837085a7f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -64,7 +64,7 @@ func Prlimit(pid, resource int, limit unix.Rlimit) error { } func SetParentDeathSignal(sig uintptr) error { - if _, _, err := unix.RawSyscall(unix.SYS_PRCTL, unix.PR_SET_PDEATHSIG, sig, 0); err != 0 { + if err := unix.Prctl(unix.PR_SET_PDEATHSIG, sig, 0, 0, 0); err != nil { return err } return nil @@ -72,15 +72,14 @@ func SetParentDeathSignal(sig uintptr) error { func GetParentDeathSignal() (ParentDeathSignal, error) { var sig int - _, _, err := unix.RawSyscall(unix.SYS_PRCTL, unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) - if err != 0 { + if err := unix.Prctl(unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0, 0, 0); err != nil { return -1, err } return ParentDeathSignal(sig), nil } func SetKeepCaps() error { - if _, _, err := unix.RawSyscall(unix.SYS_PRCTL, unix.PR_SET_KEEPCAPS, 1, 0); err != 0 { + if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 1, 0, 0, 0); err != nil { return err } @@ -88,7 +87,7 @@ func SetKeepCaps() error { } func ClearKeepCaps() error { - if _, _, err := unix.RawSyscall(unix.SYS_PRCTL, unix.PR_SET_KEEPCAPS, 0, 0); err != 0 { + if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 0, 0, 0, 0); err != nil { return err } @@ -96,7 +95,7 @@ func ClearKeepCaps() error { } func Setctty() error { - if _, _, err := unix.RawSyscall(unix.SYS_IOCTL, 0, uintptr(unix.TIOCSCTTY), 0); err != 0 { + if err := unix.IoctlSetInt(0, unix.TIOCSCTTY, 0); err != nil { return err } return nil diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go index a0e9637199..79232a4371 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go @@ -1,43 +1,113 @@ package system import ( + "fmt" "io/ioutil" "path/filepath" "strconv" "strings" ) -// look in /proc to find the process start time so that we can verify -// that this pid has started after ourself +// State is the status of a process. +type State rune + +const ( // Only values for Linux 3.14 and later are listed here + Dead State = 'X' + DiskSleep State = 'D' + Running State = 'R' + Sleeping State = 'S' + Stopped State = 'T' + TracingStop State = 't' + Zombie State = 'Z' +) + +// String forms of the state from proc(5)'s documentation for +// /proc/[pid]/status' "State" field. +func (s State) String() string { + switch s { + case Dead: + return "dead" + case DiskSleep: + return "disk sleep" + case Running: + return "running" + case Sleeping: + return "sleeping" + case Stopped: + return "stopped" + case TracingStop: + return "tracing stop" + case Zombie: + return "zombie" + default: + return fmt.Sprintf("unknown (%c)", s) + } +} + +// Stat_t represents the information from /proc/[pid]/stat, as +// described in proc(5) with names based on the /proc/[pid]/status +// fields. +type Stat_t struct { + // PID is the process ID. + PID uint + + // Name is the command run by the process. + Name string + + // State is the state of the process. + State State + + // StartTime is the number of clock ticks after system boot (since + // Linux 2.6). + StartTime uint64 +} + +// Stat returns a Stat_t instance for the specified process. +func Stat(pid int) (stat Stat_t, err error) { + bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + if err != nil { + return stat, err + } + return parseStat(string(bytes)) +} + +// GetProcessStartTime is deprecated. Use Stat(pid) and +// Stat_t.StartTime instead. func GetProcessStartTime(pid int) (string, error) { - data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + stat, err := Stat(pid) if err != nil { return "", err } - return parseStartTime(string(data)) + return fmt.Sprintf("%d", stat.StartTime), nil } -func parseStartTime(stat string) (string, error) { - // the starttime is located at pos 22 - // from the man page - // - // starttime %llu (was %lu before Linux 2.6) - // (22) The time the process started after system boot. In kernels before Linux 2.6, this - // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks - // (divide by sysconf(_SC_CLK_TCK)). - // - // NOTE: - // pos 2 could contain space and is inside `(` and `)`: - // (2) comm %s - // The filename of the executable, in parentheses. - // This is visible whether or not the executable is - // swapped out. - // - // the following is an example: +func parseStat(data string) (stat Stat_t, err error) { + // From proc(5), field 2 could contain space and is inside `(` and `)`. + // The following is an example: // 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 + i := strings.LastIndex(data, ")") + if i <= 2 || i >= len(data)-1 { + return stat, fmt.Errorf("invalid stat data: %q", data) + } - // get parts after last `)`: - s := strings.Split(stat, ")") - parts := strings.Split(strings.TrimSpace(s[len(s)-1]), " ") - return parts[22-3], nil // starts at 3 (after the filename pos `2`) + parts := strings.SplitN(data[:i], "(", 2) + if len(parts) != 2 { + return stat, fmt.Errorf("invalid stat data: %q", data) + } + + stat.Name = parts[1] + _, err = fmt.Sscanf(parts[0], "%d", &stat.PID) + if err != nil { + return stat, err + } + + // parts indexes should be offset by 3 from the field number given + // proc(5), because parts is zero-indexed and we've removed fields + // one (PID) and two (Name) in the paren-split. + parts = strings.Split(data[i+2:], " ") + var state int + fmt.Sscanf(parts[3-3], "%c", &state) + stat.State = State(state) + fmt.Sscanf(parts[22-3], "%d", &stat.StartTime) + return stat, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 8962cab331..2471535a73 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -358,8 +358,8 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( // Okay, so it's numeric. We can just roll with this. } - } else if len(groups) > 0 { - // Supplementary group ids only make sense if in the implicit form. + } else if len(groups) > 0 && uidErr != nil { + // Supplementary group ids only make sense if in the implicit form for non-numeric users. user.Sgids = make([]int, len(groups)) for i, group := range groups { user.Sgids[i] = group.Gid diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt new file mode 100644 index 0000000000..16249b4a1e --- /dev/null +++ b/vendor/github.com/posener/complete/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2017 Eyal Posener + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go new file mode 100644 index 0000000000..73c356d76c --- /dev/null +++ b/vendor/github.com/posener/complete/args.go @@ -0,0 +1,75 @@ +package complete + +import ( + "os" + "path/filepath" +) + +// Args describes command line arguments +type Args struct { + // All lists of all arguments in command line (not including the command itself) + All []string + // Completed lists of all completed arguments in command line, + // If the last one is still being typed - no space after it, + // it won't appear in this list of arguments. + Completed []string + // Last argument in command line, the one being typed, if the last + // character in the command line is a space, this argument will be empty, + // otherwise this would be the last word. + Last string + // LastCompleted is the last argument that was fully typed. + // If the last character in the command line is space, this would be the + // last word, otherwise, it would be the word before that. + LastCompleted string +} + +// Directory gives the directory of the current written +// last argument if it represents a file name being written. +// in case that it is not, we fall back to the current directory. +func (a Args) Directory() string { + if info, err := os.Stat(a.Last); err == nil && info.IsDir() { + return fixPathForm(a.Last, a.Last) + } + dir := filepath.Dir(a.Last) + if info, err := os.Stat(dir); err != nil || !info.IsDir() { + return "./" + } + return fixPathForm(a.Last, dir) +} + +func newArgs(line []string) Args { + completed := removeLast(line[1:]) + return Args{ + All: line[1:], + Completed: completed, + Last: last(line), + LastCompleted: last(completed), + } +} + +func (a Args) from(i int) Args { + if i > len(a.All) { + i = len(a.All) + } + a.All = a.All[i:] + + if i > len(a.Completed) { + i = len(a.Completed) + } + a.Completed = a.Completed[i:] + return a +} + +func removeLast(a []string) []string { + if len(a) > 0 { + return a[:len(a)-1] + } + return a +} + +func last(args []string) (last string) { + if len(args) > 0 { + last = args[len(args)-1] + } + return +} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go new file mode 100644 index 0000000000..7137dee178 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/cmd.go @@ -0,0 +1,128 @@ +// Package cmd used for command line options for the complete tool +package cmd + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + + "github.com/posener/complete/cmd/install" +) + +// CLI for command line +type CLI struct { + Name string + InstallName string + UninstallName string + + install bool + uninstall bool + yes bool +} + +const ( + defaultInstallName = "install" + defaultUninstallName = "uninstall" +) + +// Run is used when running complete in command line mode. +// this is used when the complete is not completing words, but to +// install it or uninstall it. +func (f *CLI) Run() bool { + err := f.validate() + if err != nil { + os.Stderr.WriteString(err.Error() + "\n") + os.Exit(1) + } + + switch { + case f.install: + f.prompt() + err = install.Install(f.Name) + case f.uninstall: + f.prompt() + err = install.Uninstall(f.Name) + default: + // non of the action flags matched, + // returning false should make the real program execute + return false + } + + if err != nil { + fmt.Printf("%s failed! %s\n", f.action(), err) + os.Exit(3) + } + fmt.Println("Done!") + return true +} + +// prompt use for approval +// exit if approval was not given +func (f *CLI) prompt() { + defer fmt.Println(f.action() + "ing...") + if f.yes { + return + } + fmt.Printf("%s completion for %s? ", f.action(), f.Name) + var answer string + fmt.Scanln(&answer) + + switch strings.ToLower(answer) { + case "y", "yes": + return + default: + fmt.Println("Cancelling...") + os.Exit(1) + } +} + +// AddFlags adds the CLI flags to the flag set. +// If flags is nil, the default command line flags will be taken. +// Pass non-empty strings as installName and uninstallName to override the default +// flag names. +func (f *CLI) AddFlags(flags *flag.FlagSet) { + if flags == nil { + flags = flag.CommandLine + } + + if f.InstallName == "" { + f.InstallName = defaultInstallName + } + if f.UninstallName == "" { + f.UninstallName = defaultUninstallName + } + + if flags.Lookup(f.InstallName) == nil { + flags.BoolVar(&f.install, f.InstallName, false, + fmt.Sprintf("Install completion for %s command", f.Name)) + } + if flags.Lookup(f.UninstallName) == nil { + flags.BoolVar(&f.uninstall, f.UninstallName, false, + fmt.Sprintf("Uninstall completion for %s command", f.Name)) + } + if flags.Lookup("y") == nil { + flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes'") + } +} + +// validate the CLI +func (f *CLI) validate() error { + if f.install && f.uninstall { + return errors.New("Install and uninstall are mutually exclusive") + } + return nil +} + +// action name according to the CLI values. +func (f *CLI) action() string { + switch { + case f.install: + return "Install" + case f.uninstall: + return "Uninstall" + default: + return "unknown" + } +} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go new file mode 100644 index 0000000000..a287f99865 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/bash.go @@ -0,0 +1,32 @@ +package install + +import "fmt" + +// (un)install in bash +// basically adds/remove from .bashrc: +// +// complete -C +type bash struct { + rc string +} + +func (b bash) Install(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if lineInFile(b.rc, completeCmd) { + return fmt.Errorf("already installed in %s", b.rc) + } + return appendToFile(b.rc, completeCmd) +} + +func (b bash) Uninstall(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if !lineInFile(b.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", b.rc) + } + + return removeFromFile(b.rc, completeCmd) +} + +func (bash) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go new file mode 100644 index 0000000000..fb44b2b7e0 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/install.go @@ -0,0 +1,92 @@ +package install + +import ( + "errors" + "os" + "os/user" + "path/filepath" + + "github.com/hashicorp/go-multierror" +) + +type installer interface { + Install(cmd, bin string) error + Uninstall(cmd, bin string) error +} + +// Install complete command given: +// cmd: is the command name +func Install(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not found any shells to install") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Install(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +// Uninstall complete command given: +// cmd: is the command name +func Uninstall(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not found any shells to uninstall") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Uninstall(cmd, bin) + if errI != nil { + multierror.Append(err, errI) + } + } + + return err +} + +func installers() (i []installer) { + for _, rc := range [...]string{".bashrc", ".bash_profile"} { + if f := rcFile(rc); f != "" { + i = append(i, bash{f}) + break + } + } + if f := rcFile(".zshrc"); f != "" { + i = append(i, zsh{f}) + } + return +} + +func getBinaryPath() (string, error) { + bin, err := os.Executable() + if err != nil { + return "", err + } + return filepath.Abs(bin) +} + +func rcFile(name string) string { + u, err := user.Current() + if err != nil { + return "" + } + path := filepath.Join(u.HomeDir, name) + if _, err := os.Stat(path); err != nil { + return "" + } + return path +} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go new file mode 100644 index 0000000000..2c8b44caba --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/utils.go @@ -0,0 +1,118 @@ +package install + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" +) + +func lineInFile(name string, lookFor string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + r := bufio.NewReader(f) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + return false + } + if err != nil { + return false + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + if string(line) == lookFor { + return true + } + prefix = prefix[:0] + } +} + +func appendToFile(name string, content string) error { + f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) + return err +} + +func removeFromFile(name string, content string) error { + backup := name + ".bck" + err := copyFile(name, backup) + if err != nil { + return err + } + temp, err := removeContentToTempFile(name, content) + if err != nil { + return err + } + + err = copyFile(temp, name) + if err != nil { + return err + } + + return os.Remove(backup) +} + +func removeContentToTempFile(name, content string) (string, error) { + rf, err := os.Open(name) + if err != nil { + return "", err + } + defer rf.Close() + wf, err := ioutil.TempFile("/tmp", "complete-") + if err != nil { + return "", err + } + defer wf.Close() + + r := bufio.NewReader(rf) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return "", err + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + str := string(line) + if str == content { + continue + } + wf.WriteString(str + "\n") + prefix = prefix[:0] + } + return wf.Name(), nil +} + +func copyFile(src string, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go new file mode 100644 index 0000000000..9ece779983 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/zsh.go @@ -0,0 +1,39 @@ +package install + +import "fmt" + +// (un)install in zsh +// basically adds/remove from .zshrc: +// +// autoload -U +X bashcompinit && bashcompinit" +// complete -C +type zsh struct { + rc string +} + +func (z zsh) Install(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if lineInFile(z.rc, completeCmd) { + return fmt.Errorf("already installed in %s", z.rc) + } + + bashCompInit := "autoload -U +X bashcompinit && bashcompinit" + if !lineInFile(z.rc, bashCompInit) { + completeCmd = bashCompInit + "\n" + completeCmd + } + + return appendToFile(z.rc, completeCmd) +} + +func (z zsh) Uninstall(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if !lineInFile(z.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", z.rc) + } + + return removeFromFile(z.rc, completeCmd) +} + +func (zsh) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go new file mode 100644 index 0000000000..eeeb9e0277 --- /dev/null +++ b/vendor/github.com/posener/complete/command.go @@ -0,0 +1,106 @@ +package complete + +import "github.com/posener/complete/match" + +// Command represents a command line +// It holds the data that enables auto completion of command line +// Command can also be a sub command. +type Command struct { + // Sub is map of sub commands of the current command + // The key refer to the sub command name, and the value is it's + // Command descriptive struct. + Sub Commands + + // Flags is a map of flags that the command accepts. + // The key is the flag name, and the value is it's predictions. + Flags Flags + + // GlobalFlags is a map of flags that the command accepts. + // Global flags that can appear also after a sub command. + GlobalFlags Flags + + // Args are extra arguments that the command accepts, those who are + // given without any flag before. + Args Predictor +} + +// Predict returns all possible predictions for args according to the command struct +func (c *Command) Predict(a Args) (predictions []string) { + predictions, _ = c.predict(a) + return +} + +// Commands is the type of Sub member, it maps a command name to a command struct +type Commands map[string]Command + +// Predict completion of sub command names names according to command line arguments +func (c Commands) Predict(a Args) (prediction []string) { + for sub := range c { + if match.Prefix(sub, a.Last) { + prediction = append(prediction, sub) + } + } + return +} + +// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. +type Flags map[string]Predictor + +// Predict completion of flags names according to command line arguments +func (f Flags) Predict(a Args) (prediction []string) { + for flag := range f { + if match.Prefix(flag, a.Last) { + prediction = append(prediction, flag) + } + } + return +} + +// predict options +// only is set to true if no more options are allowed to be returned +// those are in cases of special flag that has specific completion arguments, +// and other flags or sub commands can't come after it. +func (c *Command) predict(a Args) (options []string, only bool) { + + // search sub commands for predictions first + subCommandFound := false + for i, arg := range a.Completed { + if cmd, ok := c.Sub[arg]; ok { + subCommandFound = true + + // recursive call for sub command + options, only = cmd.predict(a.from(i)) + if only { + return + } + } + } + + // if last completed word is a global flag that we need to complete + if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to global flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.GlobalFlags.Predict(a)...) + + // if a sub command was entered, we won't add the parent command + // completions and we return here. + if subCommandFound { + return + } + + // if last completed word is a command flag that we need to complete + if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.Sub.Predict(a)...) + options = append(options, c.Flags.Predict(a)...) + if c.Args != nil { + options = append(options, c.Args.Predict(a)...) + } + + return +} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go new file mode 100644 index 0000000000..1df66170bd --- /dev/null +++ b/vendor/github.com/posener/complete/complete.go @@ -0,0 +1,86 @@ +// Package complete provides a tool for bash writing bash completion in go. +// +// Writing bash completion scripts is a hard work. This package provides an easy way +// to create bash completion scripts for any command, and also an easy way to install/uninstall +// the completion of the command. +package complete + +import ( + "flag" + "fmt" + "os" + "strings" + + "github.com/posener/complete/cmd" +) + +const ( + envComplete = "COMP_LINE" + envDebug = "COMP_DEBUG" +) + +// Complete structs define completion for a command with CLI options +type Complete struct { + Command Command + cmd.CLI +} + +// New creates a new complete command. +// name is the name of command we want to auto complete. +// IMPORTANT: it must be the same name - if the auto complete +// completes the 'go' command, name must be equal to "go". +// command is the struct of the command completion. +func New(name string, command Command) *Complete { + return &Complete{ + Command: command, + CLI: cmd.CLI{Name: name}, + } +} + +// Run runs the completion and add installation flags beforehand. +// The flags are added to the main flag CommandLine variable. +func (c *Complete) Run() bool { + c.AddFlags(nil) + flag.Parse() + return c.Complete() +} + +// Complete a command from completion line in environment variable, +// and print out the complete options. +// returns success if the completion ran or if the cli matched +// any of the given flags, false otherwise +// For installation: it assumes that flags were added and parsed before +// it was called. +func (c *Complete) Complete() bool { + line, ok := getLine() + if !ok { + // make sure flags parsed, + // in case they were not added in the main program + return c.CLI.Run() + } + Log("Completing line: %s", line) + + a := newArgs(line) + + options := c.Command.Predict(a) + + Log("Completion: %s", options) + output(options) + return true +} + +func getLine() ([]string, bool) { + line := os.Getenv(envComplete) + if line == "" { + return nil, false + } + return strings.Split(line, " "), true +} + +func output(options []string) { + Log("") + // stdout of program defines the complete options + for _, option := range options { + fmt.Println(option) + } +} diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go new file mode 100644 index 0000000000..797a80ced3 --- /dev/null +++ b/vendor/github.com/posener/complete/log.go @@ -0,0 +1,23 @@ +package complete + +import ( + "io" + "io/ioutil" + "log" + "os" +) + +// Log is used for debugging purposes +// since complete is running on tab completion, it is nice to +// have logs to the stderr (when writing your own completer) +// to write logs, set the COMP_DEBUG environment variable and +// use complete.Log in the complete program +var Log = getLogger() + +func getLogger() func(format string, args ...interface{}) { + var logfile io.Writer = ioutil.Discard + if os.Getenv(envDebug) != "" { + logfile = os.Stderr + } + return log.New(logfile, "complete ", log.Flags()).Printf +} diff --git a/vendor/github.com/posener/complete/match/file.go b/vendor/github.com/posener/complete/match/file.go new file mode 100644 index 0000000000..051171e8a1 --- /dev/null +++ b/vendor/github.com/posener/complete/match/file.go @@ -0,0 +1,19 @@ +package match + +import "strings" + +// File returns true if prefix can match the file +func File(file, prefix string) bool { + // special case for current directory completion + if file == "./" && (prefix == "." || prefix == "") { + return true + } + if prefix == "." && strings.HasPrefix(file, ".") { + return true + } + + file = strings.TrimPrefix(file, "./") + prefix = strings.TrimPrefix(prefix, "./") + + return strings.HasPrefix(file, prefix) +} diff --git a/vendor/github.com/posener/complete/match/match.go b/vendor/github.com/posener/complete/match/match.go new file mode 100644 index 0000000000..812fcac96b --- /dev/null +++ b/vendor/github.com/posener/complete/match/match.go @@ -0,0 +1,6 @@ +package match + +// Match matches two strings +// it is used for comparing a term to the last typed +// word, the prefix, and see if it is a possible auto complete option. +type Match func(term, prefix string) bool diff --git a/vendor/github.com/posener/complete/match/prefix.go b/vendor/github.com/posener/complete/match/prefix.go new file mode 100644 index 0000000000..9a01ba63a4 --- /dev/null +++ b/vendor/github.com/posener/complete/match/prefix.go @@ -0,0 +1,9 @@ +package match + +import "strings" + +// Prefix is a simple Matcher, if the word is it's prefix, there is a match +// Match returns true if a has the prefix as prefix +func Prefix(long, prefix string) bool { + return strings.HasPrefix(long, prefix) +} diff --git a/vendor/github.com/posener/complete/metalinter.json b/vendor/github.com/posener/complete/metalinter.json new file mode 100644 index 0000000000..799c1d03f2 --- /dev/null +++ b/vendor/github.com/posener/complete/metalinter.json @@ -0,0 +1,21 @@ +{ + "Vendor": true, + "DisableAll": true, + "Enable": [ + "gofmt", + "goimports", + "interfacer", + "goconst", + "misspell", + "unconvert", + "gosimple", + "golint", + "structcheck", + "deadcode", + "vet" + ], + "Exclude": [ + "initTests is unused" + ], + "Deadline": "2m" +} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go new file mode 100644 index 0000000000..820706325b --- /dev/null +++ b/vendor/github.com/posener/complete/predict.go @@ -0,0 +1,41 @@ +package complete + +// Predictor implements a predict method, in which given +// command line arguments returns a list of options it predicts. +type Predictor interface { + Predict(Args) []string +} + +// PredictOr unions two predicate functions, so that the result predicate +// returns the union of their predication +func PredictOr(predictors ...Predictor) Predictor { + return PredictFunc(func(a Args) (prediction []string) { + for _, p := range predictors { + if p == nil { + continue + } + prediction = append(prediction, p.Predict(a)...) + } + return + }) +} + +// PredictFunc determines what terms can follow a command or a flag +// It is used for auto completion, given last - the last word in the already +// in the command line, what words can complete it. +type PredictFunc func(Args) []string + +// Predict invokes the predict function and implements the Predictor interface +func (p PredictFunc) Predict(a Args) []string { + if p == nil { + return nil + } + return p(a) +} + +// PredictNothing does not expect anything after. +var PredictNothing Predictor + +// PredictAnything expects something, but nothing particular, such as a number +// or arbitrary name. +var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go new file mode 100644 index 0000000000..c8adf7e806 --- /dev/null +++ b/vendor/github.com/posener/complete/predict_files.go @@ -0,0 +1,108 @@ +package complete + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/posener/complete/match" +) + +// PredictDirs will search for directories in the given started to be typed +// path, if no path was started to be typed, it will complete to directories +// in the current working directory. +func PredictDirs(pattern string) Predictor { + return files(pattern, false) +} + +// PredictFiles will search for files matching the given pattern in the started to +// be typed path, if no path was started to be typed, it will complete to files that +// match the pattern in the current working directory. +// To match any file, use "*" as pattern. To match go files use "*.go", and so on. +func PredictFiles(pattern string) Predictor { + return files(pattern, true) +} + +func files(pattern string, allowFiles bool) PredictFunc { + + // search for files according to arguments, + // if only one directory has matched the result, search recursively into + // this directory to give more results. + return func(a Args) (prediction []string) { + prediction = predictFiles(a, pattern, allowFiles) + + // if the number of prediction is not 1, we either have many results or + // have no results, so we return it. + if len(prediction) != 1 { + return + } + + // only try deeper, if the one item is a directory + if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { + return + } + + a.Last = prediction[0] + return predictFiles(a, pattern, allowFiles) + } +} + +func predictFiles(a Args, pattern string, allowFiles bool) []string { + if strings.HasSuffix(a.Last, "/..") { + return nil + } + + dir := a.Directory() + files := listFiles(dir, pattern, allowFiles) + + // add dir if match + files = append(files, dir) + + return PredictFilesSet(files).Predict(a) +} + +// PredictFilesSet predict according to file rules to a given set of file names +func PredictFilesSet(files []string) PredictFunc { + return func(a Args) (prediction []string) { + // add all matching files to prediction + for _, f := range files { + f = fixPathForm(a.Last, f) + + // test matching of file to the argument + if match.File(f, a.Last) { + prediction = append(prediction, f) + } + } + return + } +} + +func listFiles(dir, pattern string, allowFiles bool) []string { + // set of all file names + m := map[string]bool{} + + // list files + if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { + for _, f := range files { + if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { + m[f] = true + } + } + } + + // list directories + if dirs, err := ioutil.ReadDir(dir); err == nil { + for _, d := range dirs { + if d.IsDir() { + m[filepath.Join(dir, d.Name())] = true + } + } + } + + list := make([]string, 0, len(m)) + for k := range m { + list = append(list, k) + } + return list +} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go new file mode 100644 index 0000000000..8fc59d7147 --- /dev/null +++ b/vendor/github.com/posener/complete/predict_set.go @@ -0,0 +1,19 @@ +package complete + +import "github.com/posener/complete/match" + +// PredictSet expects specific set of terms, given in the options argument. +func PredictSet(options ...string) Predictor { + return predictSet(options) +} + +type predictSet []string + +func (p predictSet) Predict(a Args) (prediction []string) { + for _, m := range p { + if match.Prefix(m, a.Last) { + prediction = append(prediction, m) + } + } + return +} diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md new file mode 100644 index 0000000000..74077e3570 --- /dev/null +++ b/vendor/github.com/posener/complete/readme.md @@ -0,0 +1,116 @@ +# complete + +[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) +[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) +[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) +[![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete) + +A tool for bash writing bash completion in go. + +Writing bash completion scripts is a hard work. This package provides an easy way +to create bash completion scripts for any command, and also an easy way to install/uninstall +the completion of the command. + +## go command bash completion + +In [gocomplete](./gocomplete) there is an example for bash completion for the `go` command line. + +This is an example that uses the `complete` package on the `go` command - the `complete` package +can also be used to implement any completions, see [Usage](#usage). + +### Install + +1. Type in your shell: +``` +go get -u github.com/posener/complete/gocomplete +gocomplete -install +``` + +2. Restart your shell + +Uninstall by `gocomplete -uninstall` + +### Features + +- Complete `go` command, including sub commands and all flags. +- Complete packages names or `.go` files when necessary. +- Complete test names after `-run` flag. + +## complete package + +Supported shells: + +- [x] bash +- [x] zsh + +### Usage + +Assuming you have program called `run` and you want to have bash completion +for it, meaning, if you type `run` then space, then press the `Tab` key, +the shell will suggest relevant complete options. + +In that case, we will create a program called `runcomplete`, a go program, +with a `func main()` and so, that will make the completion of the `run` +program. Once the `runcomplete` will be in a binary form, we could +`runcomplete -install` and that will add to our shell all the bash completion +options for `run`. + +So here it is: + +```go +import "github.com/posener/complete" + +func main() { + + // create a Command object, that represents the command we want + // to complete. + run := complete.Command{ + + // Sub defines a list of sub commands of the program, + // this is recursive, since every command is of type command also. + Sub: complete.Commands{ + + // add a build sub command + "build": complete.Command { + + // define flags of the build sub command + Flags: complete.Flags{ + // build sub command has a flag '-cpus', which + // expects number of cpus after it. in that case + // anything could complete this flag. + "-cpus": complete.PredictAnything, + }, + }, + }, + + // define flags of the 'run' main command + Flags: complete.Flags{ + // a flag -o, which expects a file ending with .out after + // it, the tab completion will auto complete for files matching + // the given pattern. + "-o": complete.PredictFiles("*.out"), + }, + + // define global flags of the 'run' main command + // those will show up also when a sub command was entered in the + // command line + GlobalFlags: complete.Flags{ + + // a flag '-h' which does not expects anything after it + "-h": complete.PredictNothing, + }, + } + + // run the command completion, as part of the main() function. + // this triggers the autocompletion when needed. + // name must be exactly as the binary that we want to complete. + complete.New("run", run).Run() +} +``` + +### Self completing program + +In case that the program that we want to complete is written in go we +can make it self completing. + +Here is an [example](./example/self/main.go) diff --git a/vendor/github.com/posener/complete/test.sh b/vendor/github.com/posener/complete/test.sh new file mode 100755 index 0000000000..56bfcf15d1 --- /dev/null +++ b/vendor/github.com/posener/complete/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -v -race -coverprofile=profile.out -covermode=atomic $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done \ No newline at end of file diff --git a/vendor/github.com/posener/complete/utils.go b/vendor/github.com/posener/complete/utils.go new file mode 100644 index 0000000000..58b8b79277 --- /dev/null +++ b/vendor/github.com/posener/complete/utils.go @@ -0,0 +1,46 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" +) + +// fixPathForm changes a file name to a relative name +func fixPathForm(last string, file string) string { + // get wording directory for relative name + workDir, err := os.Getwd() + if err != nil { + return file + } + + abs, err := filepath.Abs(file) + if err != nil { + return file + } + + // if last is absolute, return path as absolute + if filepath.IsAbs(last) { + return fixDirPath(abs) + } + + rel, err := filepath.Rel(workDir, abs) + if err != nil { + return file + } + + // fix ./ prefix of path + if rel != "." && strings.HasPrefix(last, ".") { + rel = "./" + rel + } + + return fixDirPath(rel) +} + +func fixDirPath(path string) string { + info, err := os.Stat(path) + if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { + path += "/" + } + return path +} diff --git a/vendor/github.com/ryanuber/columnize/README.md b/vendor/github.com/ryanuber/columnize/README.md index e47634fc68..9a583db8db 100644 --- a/vendor/github.com/ryanuber/columnize/README.md +++ b/vendor/github.com/ryanuber/columnize/README.md @@ -56,12 +56,14 @@ config.Delim = "|" config.Glue = " " config.Prefix = "" config.Empty = "" +config.NoTrim = false ``` * `Delim` is the string by which columns of **input** are delimited * `Glue` is the string by which columns of **output** are delimited * `Prefix` is a string by which each line of **output** is prefixed * `Empty` is a string used to replace blank values found in output +* `NoTrim` is a boolean used to disable the automatic trimming of input values You can then pass the `Config` in using the `Format` method (signature below) to have text formatted to your liking. diff --git a/vendor/github.com/ryanuber/columnize/columnize.go b/vendor/github.com/ryanuber/columnize/columnize.go index bff014fac7..527c1d1b36 100644 --- a/vendor/github.com/ryanuber/columnize/columnize.go +++ b/vendor/github.com/ryanuber/columnize/columnize.go @@ -18,8 +18,11 @@ type Config struct { // The string by which columns of output will be prefixed. Prefix string - // A replacement string to replace empty fields + // A replacement string to replace empty fields. Empty string + + // NoTrim disables automatic trimming of inputs. + NoTrim bool } // DefaultConfig returns a *Config with default values. @@ -29,18 +32,22 @@ func DefaultConfig() *Config { Glue: " ", Prefix: "", Empty: "", + NoTrim: false, } } // MergeConfig merges two config objects together and returns the resulting // configuration. Values from the right take precedence over the left side. func MergeConfig(a, b *Config) *Config { - var result Config = *a - // Return quickly if either side was nil - if a == nil || b == nil { - return &result + if a == nil { + return b } + if b == nil { + return a + } + + var result Config = *a if b.Delim != "" { result.Delim = b.Delim @@ -54,6 +61,9 @@ func MergeConfig(a, b *Config) *Config { if b.Empty != "" { result.Empty = b.Empty } + if b.NoTrim { + result.NoTrim = true + } return &result } @@ -86,7 +96,10 @@ func elementsFromLine(config *Config, line string) []interface{} { separated := strings.Split(line, config.Delim) elements := make([]interface{}, len(separated)) for i, field := range separated { - value := strings.TrimSpace(field) + value := field + if !config.NoTrim { + value = strings.TrimSpace(field) + } // Apply the empty value, if configured. if value == "" && config.Empty != "" { diff --git a/vendor/github.com/sethgrid/pester/main.go b/vendor/github.com/sethgrid/pester/main.go index 4a697919b4..55db6fa39f 100644 --- a/vendor/github.com/sethgrid/pester/main.go +++ b/vendor/github.com/sethgrid/pester/main.go @@ -109,8 +109,8 @@ func NewExtendedClient(hc *http.Client) *Client { return c } -// PrintErrStrategy is used to log attempts as they happen. -// You know, more visible +// LogHook is used to log attempts as they happen. This function is never called, +// however, if KeepLog is set to true. type LogHook func(e ErrEntry) // BackoffStrategy is used to determine how long a retry request should wait until attempted diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 0000000000..c443aed097 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,109 @@ +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/LICENSE.md b/vendor/github.com/sirupsen/logrus/LICENSE similarity index 96% rename from vendor/github.com/hashicorp/net-rpc-msgpackrpc/LICENSE.md rename to vendor/github.com/sirupsen/logrus/LICENSE index dfe3850392..f090cb42f3 100644 --- a/vendor/github.com/hashicorp/net-rpc-msgpackrpc/LICENSE.md +++ b/vendor/github.com/sirupsen/logrus/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2013 HashiCorp, Inc. +Copyright (c) 2014 Simon Eskildsen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md new file mode 100644 index 0000000000..82aeb4eef3 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -0,0 +1,504 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +**Are you interested in assisting in maintaining Logrus?** Currently I have a +lot of obligations, and I am unable to provide Logrus with the maintainership it +needs. If you'd like to help, please reach out to me at `simon at author's +username dot com`. + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | +| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | +| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| +| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | +| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000000..8af90637a9 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 0000000000..da67aba06d --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 0000000000..5bf582ef27 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,276 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" +) + +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + serialized, err := entry.Logger.Formatter.Format(&entry) + entry.Buffer = nil + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } else { + entry.Logger.mu.Lock() + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + entry.Logger.mu.Unlock() + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 0000000000..013183edab --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.level() +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 0000000000..b5fbe934d1 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,45 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + if t, ok := data["time"]; ok { + data["fields.time"] = t + } + + if m, ok := data["msg"]; ok { + data["fields.msg"] = m + } + + if l, ok := data["level"]; ok { + data["fields.level"] = l + } +} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 0000000000..3f151cdc39 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000000..e787ea1750 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string +type FieldMap map[fieldKey]string + +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for various fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // }, + // } + FieldMap FieldMap +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 0000000000..b44966f973 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,317 @@ +package logrus + +import ( + "io" + "os" + "sync" + "sync/atomic" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 0000000000..dd38999741 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_appengine.go new file mode 100644 index 0000000000..e011a86945 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_appengine.go @@ -0,0 +1,10 @@ +// +build appengine + +package logrus + +import "io" + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go new file mode 100644 index 0000000000..5f6be4d3c0 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go new file mode 100644 index 0000000000..308160ca80 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 0000000000..190297abf3 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,28 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + var termios Termios + switch v := f.(type) { + case *os.File: + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_solaris.go new file mode 100644 index 0000000000..3c86b1abee --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,21 @@ +// +build solaris,!appengine + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go new file mode 100644 index 0000000000..7a336307e5 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_windows.go @@ -0,0 +1,82 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!appengine + +package logrus + +import ( + "bytes" + "errors" + "io" + "os" + "os/exec" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") +) + +const ( + enableProcessedOutput = 0x0001 + enableWrapAtEolOutput = 0x0002 + enableVirtualTerminalProcessing = 0x0004 +) + +func getVersion() (float64, error) { + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + cmd := exec.Command("cmd", "ver") + cmd.Stdout = stdout + cmd.Stderr = stderr + err := cmd.Run() + if err != nil { + return -1, err + } + + // The output should be like "Microsoft Windows [Version XX.X.XXXXXX]" + version := strings.Replace(stdout.String(), "\n", "", -1) + version = strings.Replace(version, "\r\n", "", -1) + + x1 := strings.Index(version, "[Version") + + if x1 == -1 || strings.Index(version, "]") == -1 { + return -1, errors.New("Can't determine Windows version") + } + + return strconv.ParseFloat(version[x1+9:x1+13], 64) +} + +func init() { + ver, err := getVersion() + if err != nil { + return + } + + // Activate Virtual Processing for Windows CMD + // Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + if ver >= 10 { + handle := syscall.Handle(os.Stderr.Fd()) + procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing) + } +} + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000000..cf3f17f2f8 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,175 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 36 + gray = 37 +) + +var ( + baseTimestamp time.Time +) + +func init() { + baseTimestamp = time.Now() +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = IsTerminal(entry.Logger.Out) + } +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var b *bytes.Buffer + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + prefixFieldClashes(entry.Data) + + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) + b.WriteByte(' ') +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 0000000000..7bdebedc60 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,62 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go index bdb2d5f19d..618c51505f 100644 --- a/vendor/github.com/ugorji/go/codec/binc.go +++ b/vendor/github.com/ugorji/go/codec/binc.go @@ -356,6 +356,9 @@ func (d *bincDecDriver) uncacheRead() { } func (d *bincDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } if d.vd == bincVdSpecial && d.vs == bincSpNil { return valueTypeNil } else if d.vd == bincVdByteArray { @@ -580,6 +583,9 @@ func (d *bincDecDriver) DecodeBool() (b bool) { } func (d *bincDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } if d.vd != bincVdMap { d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) return @@ -590,6 +596,9 @@ func (d *bincDecDriver) ReadMapStart() (length int) { } func (d *bincDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } if d.vd != bincVdArray { d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) return diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go index ce3e0e043f..4c72caffb2 100644 --- a/vendor/github.com/ugorji/go/codec/cbor.go +++ b/vendor/github.com/ugorji/go/codec/cbor.go @@ -196,6 +196,9 @@ func (d *cborDecDriver) uncacheRead() { } func (d *cborDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } if d.bd == cborBdNil { return valueTypeNil } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { @@ -351,6 +354,9 @@ func (d *cborDecDriver) DecodeBool() (b bool) { } func (d *cborDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } d.bdRead = false if d.bd == cborBdIndefiniteMap { return -1 @@ -359,6 +365,9 @@ func (d *cborDecDriver) ReadMapStart() (length int) { } func (d *cborDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } d.bdRead = false if d.bd == cborBdIndefiniteArray { return -1 diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go index cbe2df48a3..2563668ba7 100644 --- a/vendor/github.com/ugorji/go/codec/decode.go +++ b/vendor/github.com/ugorji/go/codec/decode.go @@ -161,7 +161,9 @@ type DecodeOptions struct { // look them up from a map (than to allocate them afresh). // // Note: Handles will be smart when using the intern functionality. - // So everything will not be interned. + // Every string should not be interned. + // An excellent use-case for interning is struct field names, + // or map keys where key type is string. InternString bool // PreferArrayOverSlice controls whether to decode to an array or a slice. @@ -740,7 +742,8 @@ func (f *decFnInfo) kStruct(rv reflect.Value) { if cr != nil { cr.sendContainerState(containerMapKey) } - rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) + rvkencnameB := dd.DecodeBytes(f.d.b[:], true, true) + rvkencname := stringView(rvkencnameB) // rvksi := ti.getForEncName(rvkencname) if cr != nil { cr.sendContainerState(containerMapValue) @@ -755,6 +758,7 @@ func (f *decFnInfo) kStruct(rv reflect.Value) { } else { d.structFieldNotFound(-1, rvkencname) } + keepAlive4StringView(rvkencnameB) // maintain ref 4 stringView } } else { for j := 0; !dd.CheckBreak(); j++ { @@ -762,7 +766,8 @@ func (f *decFnInfo) kStruct(rv reflect.Value) { if cr != nil { cr.sendContainerState(containerMapKey) } - rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) + rvkencnameB := dd.DecodeBytes(f.d.b[:], true, true) + rvkencname := stringView(rvkencnameB) // rvksi := ti.getForEncName(rvkencname) if cr != nil { cr.sendContainerState(containerMapValue) @@ -777,6 +782,7 @@ func (f *decFnInfo) kStruct(rv reflect.Value) { } else { d.structFieldNotFound(-1, rvkencname) } + keepAlive4StringView(rvkencnameB) // maintain ref 4 stringView } } if cr != nil { @@ -1873,11 +1879,14 @@ func (d *Decoder) errorf(format string, params ...interface{}) { panic(err) } +// Possibly get an interned version of a string +// +// This should mostly be used for map keys, where the key type is string func (d *Decoder) string(v []byte) (s string) { if d.is != nil { - s, ok := d.is[string(v)] // no allocation here. + s, ok := d.is[string(v)] // no allocation here, per go implementation if !ok { - s = string(v) + s = string(v) // new allocation here d.is[s] = s } return s @@ -1885,11 +1894,11 @@ func (d *Decoder) string(v []byte) (s string) { return string(v) // don't return stringView, as we need a real string here. } -func (d *Decoder) intern(s string) { - if d.is != nil { - d.is[s] = s - } -} +// func (d *Decoder) intern(s string) { +// if d.is != nil { +// d.is[s] = s +// } +// } // nextValueBytes returns the next value in the stream as a set of bytes. func (d *Decoder) nextValueBytes() []byte { diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go index c2cef812ec..268154d24c 100644 --- a/vendor/github.com/ugorji/go/codec/encode.go +++ b/vendor/github.com/ugorji/go/codec/encode.go @@ -1027,6 +1027,8 @@ func (e *Encoder) ResetBytes(out *[]byte) { // However, struct values may encode as arrays. This happens when: // - StructToArray Encode option is set, OR // - the tag on the _struct field sets the "toarray" option +// Note that omitempty is ignored when encoding struct values as arrays, +// as an entry must be encoded for each field, to maintain its position. // // Values with types that implement MapBySlice are encoded as stream maps. // @@ -1053,8 +1055,7 @@ func (e *Encoder) ResetBytes(out *[]byte) { // } // // type MyStruct struct { -// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field -// //and encode struct as an array +// _struct bool `codec:",toarray"` //encode struct as an array // } // // The mode of encoding is based on the type of the value. When a value is seen: diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go index c4944dbff3..da66921a66 100644 --- a/vendor/github.com/ugorji/go/codec/gen.go +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -1653,15 +1653,8 @@ func (x *genV) MethodNamePfx(prefix string, prim bool) string { func genImportPath(t reflect.Type) (s string) { s = t.PkgPath() if genCheckVendor { - // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. - // if s contains /vendor/ OR startsWith vendor/, then return everything after it. - const vendorStart = "vendor/" - const vendorInline = "/vendor/" - if i := strings.LastIndex(s, vendorInline); i >= 0 { - s = s[i+len(vendorInline):] - } else if strings.HasPrefix(s, vendorStart) { - s = s[len(vendorStart):] - } + // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 + s = stripVendor(s) } return } @@ -1884,6 +1877,19 @@ func genInternalSortType(s string, elem bool) string { panic("sorttype: unexpected type: " + s) } +func stripVendor(s string) string { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + return s +} + // var genInternalMu sync.Mutex var genInternalV genInternal var genInternalTmplFuncs template.FuncMap diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go index 8b06a00459..f254b98860 100644 --- a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -8,6 +8,9 @@ package codec // stringView returns a view of the []byte as a string. // In unsafe mode, it doesn't incur allocation and copying caused by conversion. // In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. func stringView(v []byte) string { return string(v) } @@ -15,6 +18,19 @@ func stringView(v []byte) string { // bytesView returns a view of the string as a []byte. // In unsafe mode, it doesn't incur allocation and copying caused by conversion. // In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. func bytesView(v string) []byte { return []byte(v) } + +// keepAlive4BytesView maintains a reference to the input parameter for bytesView. +// +// Usage: call this at point where done with the bytes view. +func keepAlive4BytesView(v string) {} + +// keepAlive4BytesView maintains a reference to the input parameter for stringView. +// +// Usage: call this at point where done with the string view. +func keepAlive4StringView(v []byte) {} diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go index 0f596c71aa..6c146f77cd 100644 --- a/vendor/github.com/ugorji/go/codec/helper_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -6,10 +6,12 @@ package codec import ( + "runtime" "unsafe" ) // This file has unsafe variants of some helper methods. +// NOTE: See helper_not_unsafe.go for the usage information. type unsafeString struct { Data uintptr @@ -22,9 +24,6 @@ type unsafeSlice struct { Cap int } -// stringView returns a view of the []byte as a string. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. func stringView(v []byte) string { if len(v) == 0 { return "" @@ -35,9 +34,6 @@ func stringView(v []byte) string { return *(*string)(unsafe.Pointer(&sx)) } -// bytesView returns a view of the string as a []byte. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. func bytesView(v string) []byte { if len(v) == 0 { return zeroByteSlice @@ -47,3 +43,11 @@ func bytesView(v string) []byte { bx := unsafeSlice{sx.Data, sx.Len, sx.Len} return *(*[]byte)(unsafe.Pointer(&bx)) } + +func keepAlive4BytesView(v string) { + runtime.KeepAlive(v) +} + +func keepAlive4StringView(v []byte) { + runtime.KeepAlive(v) +} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go index 4907c6497d..7309769739 100644 --- a/vendor/github.com/ugorji/go/codec/msgpack.go +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -569,6 +569,9 @@ func (d *msgpackDecDriver) uncacheRead() { } func (d *msgpackDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } bd := d.bd if bd == mpNil { return valueTypeNil @@ -621,10 +624,16 @@ func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) } func (d *msgpackDecDriver) ReadMapStart() int { + if !d.bdRead { + d.readNextBd() + } return d.readContainerLen(msgpackContainerMap) } func (d *msgpackDecDriver) ReadArrayStart() int { + if !d.bdRead { + d.readNextBd() + } return d.readContainerLen(msgpackContainerList) } diff --git a/vendor/github.com/ugorji/go/codec/prebuild.sh b/vendor/github.com/ugorji/go/codec/prebuild.sh index 909f4bb0f2..422c1a7342 100755 --- a/vendor/github.com/ugorji/go/codec/prebuild.sh +++ b/vendor/github.com/ugorji/go/codec/prebuild.sh @@ -38,7 +38,7 @@ _build() { return 0 fi - # echo "Running prebuild" + # echo "Running prebuild" if [ "${zbak}" == "1" ] then # echo "Backing up old generated files" @@ -46,12 +46,13 @@ _build() { _gg=".generated.go" [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak + [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak # [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak # [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak - else - rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \ - *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go - fi + fi + rm -f gen-helper.generated.go fast-path.generated.go \ + gen.generated.go \ + *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go cat > gen.generated.go <= 20 { + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. return nil, fmt.Errorf("ssh: illegal padding %d", padding) } diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index c87fbebfde..f91c2770ed 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -383,8 +383,8 @@ func init() { // 4253 and Oakley Group 2 in RFC 2409. p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, + g: new(big.Int).SetInt64(2), + p: p, pMinus1: new(big.Int).Sub(p, bigOne), } @@ -393,8 +393,8 @@ func init() { p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, + g: new(big.Int).SetInt64(2), + p: p, pMinus1: new(big.Int).Sub(p, bigOne), } diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 4c8b1a8f7d..7a8756a933 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -802,6 +802,9 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { } } +// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with +// passphrase from a PEM encoded private key. If wrong passphrase, return +// x509.IncorrectPasswordError. func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { block, _ := pem.Decode(pemBytes) if block == nil { @@ -814,6 +817,9 @@ func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, var err error buf, err = x509.DecryptPEMBlock(block, passPhrase) if err != nil { + if err == x509.IncorrectPasswordError { + return nil, err + } return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) } } diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 23b41d943f..b6f4cc8111 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -14,23 +14,34 @@ import ( ) // The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a -// user. Permissions, except for "source-address", must be enforced in -// the server application layer, after successful authentication. The -// Permissions are passed on in ServerConn so a server implementation -// can honor them. +// specific to a user or a specific authentication method for a user. +// The Permissions value for a successful authentication attempt is +// available in ServerConn, so it can be used to pass information from +// the user-authentication phase to the application layer. type Permissions struct { - // Critical options restrict default permissions. Common - // restrictions are "source-address" and "force-command". If - // the server cannot enforce the restriction, or does not - // recognize it, the user should not authenticate. + // CriticalOptions indicate restrictions to the default + // permissions, and are typically used in conjunction with + // user certificates. The standard for SSH certificates + // defines "force-command" (only allow the given command to + // execute) and "source-address" (only allow connections from + // the given address). The SSH package currently only enforces + // the "source-address" critical option. It is up to server + // implementations to enforce other critical options, such as + // "force-command", by checking them after the SSH handshake + // is successful. In general, SSH servers should reject + // connections that specify critical options that are unknown + // or not supported. CriticalOptions map[string]string // Extensions are extra functionality that the server may - // offer on authenticated connections. Common extensions are - // "permit-agent-forwarding", "permit-X11-forwarding". Lack of - // support for an extension does not preclude authenticating a - // user. + // offer on authenticated connections. Lack of support for an + // extension does not preclude authenticating a user. Common + // extensions are "permit-agent-forwarding", + // "permit-X11-forwarding". The Go SSH library currently does + // not act on any extension, and it is up to server + // implementations to honor them. Extensions can be used to + // pass data from the authentication callbacks to the server + // application layer. Extensions map[string]string } @@ -55,9 +66,14 @@ type ServerConfig struct { // attempts to authenticate using a password. PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - // PublicKeyCallback, if non-nil, is called when a client attempts public - // key authentication. It must return true if the given public key is - // valid for the given user. For example, see CertChecker.Authenticate. + // PublicKeyCallback, if non-nil, is called when a client + // offers a public key for authentication. It must return true + // if the given public key can be used to authenticate the + // given user. For example, see CertChecker.Authenticate. A + // call to this function does not guarantee that the key + // offered is in fact used to authenticate. To record any data + // depending on the public key, store it inside a + // Permissions.Extensions entry. PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) // KeyboardInteractiveCallback, if non-nil, is called when @@ -272,12 +288,30 @@ func checkSourceAddress(addr net.Addr, sourceAddrs string) error { return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) } +// ServerAuthError implements the error interface. It appends any authentication +// errors that may occur, and is returned if all of the authentication methods +// provided by the user failed to authenticate. +type ServerAuthError struct { + // Errors contains authentication errors returned by the authentication + // callback methods. + Errors []error +} + +func (l ServerAuthError) Error() string { + var errs []string + for _, err := range l.Errors { + errs = append(errs, err.Error()) + } + return "[" + strings.Join(errs, ", ") + "]" +} + func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { sessionID := s.transport.getSessionID() var cache pubKeyCache var perms *Permissions authFailures := 0 + var authErrs []error userAuthLoop: for { @@ -296,6 +330,9 @@ userAuthLoop: var userAuthReq userAuthRequestMsg if packet, err := s.transport.readPacket(); err != nil { + if err == io.EOF { + return nil, &ServerAuthError{Errors: authErrs} + } return nil, err } else if err = Unmarshal(packet, &userAuthReq); err != nil { return nil, err @@ -432,6 +469,8 @@ userAuthLoop: authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) } + authErrs = append(authErrs, authErr) + if config.AuthLogCallback != nil { config.AuthLogCallback(s, userAuthReq.Method, authErr) } diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index f143ed6a1e..d3681ab428 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -36,103 +36,6 @@ // Contexts. package context // import "golang.org/x/net/context" -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming @@ -149,8 +52,3 @@ func Background() Context { func TODO() Context { return todo } - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 0000000000..d88bd1db12 --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 0000000000..b105f80be4 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go index 73cc2381f3..4f30d228a8 100644 --- a/vendor/golang.org/x/net/http2/go18.go +++ b/vendor/golang.org/x/net/http2/go18.go @@ -52,3 +52,5 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { func reqBodyIsNoBody(body io.ReadCloser) bool { return body == http.NoBody } + +func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go index efbf83c32c..6f8d3f86fa 100644 --- a/vendor/golang.org/x/net/http2/not_go18.go +++ b/vendor/golang.org/x/net/http2/not_go18.go @@ -25,3 +25,5 @@ func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { } func reqBodyIsNoBody(io.ReadCloser) bool { return false } + +func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 7367b31c57..eae143ddff 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2252,6 +2252,7 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2333,6 +2334,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { + rws.dirty = true return 0, err } if endStream { @@ -2354,6 +2356,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true return 0, err } } @@ -2365,6 +2368,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) + if err != nil { + rws.dirty = true + } return len(p), err } return len(p), nil @@ -2504,7 +2510,7 @@ func cloneHeader(h http.Header) http.Header { // // * Handler calls w.Write or w.WriteString -> // * -> rws.bw (*bufio.Writer) -> -// * (Handler migth call Flush) +// * (Handler might call Flush) // * -> chunkWriter{rws} // * -> responseWriterState.writeChunk(p []byte) // * -> responseWriterState.writeChunk (most of the magic; see comment there) @@ -2543,10 +2549,19 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws + dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - responseWriterStatePool.Put(rws) + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } } // Push errors. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 3a85f25a20..850d7ae09c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -694,7 +694,7 @@ func checkConnHeaders(req *http.Request) error { // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. func actualContentLength(req *http.Request) int64 { - if req.Body == nil { + if req.Body == nil || reqBodyIsNoBody(req.Body) { return 0 } if req.ContentLength != 0 { @@ -725,8 +725,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { } body := req.Body - hasBody := body != nil contentLen := actualContentLength(req) + hasBody := contentLen != 0 // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? var requestedGzip bool @@ -1713,16 +1713,27 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } // Return any padded flow control now, since we won't // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - cs.inflow.add(pad) - cc.inflow.add(pad) + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(pad)) - cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } cc.bw.Flush() cc.wmu.Unlock() } - didReset := cs.didReset cc.mu.Unlock() if len(data) > 0 && !didReset { diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go index ee2dbda6d0..eb2473507b 100644 --- a/vendor/golang.org/x/net/idna/idna.go +++ b/vendor/golang.org/x/net/idna/idna.go @@ -67,6 +67,15 @@ func VerifyDNSLength(verify bool) Option { return func(o *options) { o.verifyDNSLength = verify } } +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + // ValidateLabels sets whether to check the mandatory label validation criteria // as defined in Section 5.4 of RFC 5891. This includes testing for correct use // of hyphens ('-'), normalization, validity of runes, and the context rules. @@ -133,14 +142,16 @@ func MapForLookup() Option { o.mapping = validateAndMap StrictDomainName(true)(o) ValidateLabels(true)(o) + RemoveLeadingDots(true)(o) } } type options struct { - transitional bool - useSTD3Rules bool - validateLabels bool - verifyDNSLength bool + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool trie *idnaTrie @@ -240,21 +251,23 @@ var ( punycode = &Profile{} lookup = &Profile{options{ - transitional: true, - useSTD3Rules: true, - validateLabels: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + transitional: true, + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} display = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} registration = &Profile{options{ useSTD3Rules: true, @@ -293,7 +306,9 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { s, err = p.mapping(p, s) } // Remove leading empty labels. - for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } } // It seems like we should only create this error on ToASCII, but the // UTS 46 conformance tests suggests we should always check this. @@ -373,23 +388,20 @@ func validateRegistration(p *Profile, s string) (string, error) { if !norm.NFC.IsNormalString(s) { return s, &labelError{s, "V1"} } - var err error for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) - i += sz // Copy bytes not copied so far. switch p.simplify(info(v).category()) { // TODO: handle the NV8 defined in the Unicode idna data set to allow // for strict conformance to IDNA2008. case valid, deviation: case disallowed, mapped, unknown, ignored: - if err == nil { - r, _ := utf8.DecodeRuneInString(s[i:]) - err = runeError(r) - } + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, runeError(r) } + i += sz } - return s, err + return s, nil } func validateAndMap(p *Profile, s string) (string, error) { @@ -408,7 +420,7 @@ func validateAndMap(p *Profile, s string) (string, error) { continue case disallowed: if err == nil { - r, _ := utf8.DecodeRuneInString(s[i:]) + r, _ := utf8.DecodeRuneInString(s[start:]) err = runeError(r) } continue diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go index d8daec1a79..c646a6952e 100644 --- a/vendor/golang.org/x/net/trace/events.go +++ b/vendor/golang.org/x/net/trace/events.go @@ -39,9 +39,9 @@ var buckets = []bucket{ } // RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { now := time.Now() data := &struct { diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go index 3d9b646111..bb72a527e8 100644 --- a/vendor/golang.org/x/net/trace/trace.go +++ b/vendor/golang.org/x/net/trace/trace.go @@ -110,30 +110,46 @@ var AuthRequest = func(req *http.Request) (any, sensitive bool) { } func init() { - http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) - }) - http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) - }) + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) } // Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. func Render(w io.Writer, req *http.Request, sensitive bool) { data := &struct { Families []string diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 7cce374caa..0487c81b5a 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,6 +18,7 @@ import ( "time" "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" ) // Token represents the crendentials used to authorize @@ -105,6 +106,7 @@ var brokenAuthHeaderProviders = []string{ "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214 "https://login.microsoftonline.com/", "https://login.salesforce.com/", + "https://login.windows.net", "https://oauth.sandbox.trainingpeaks.com/", "https://oauth.trainingpeaks.com/", "https://oauth.vk.com/", @@ -188,7 +190,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, if !bustedAuth { req.SetBasicAuth(clientID, clientSecret) } - r, err := hc.Do(req) + r, err := ctxhttp.Do(ctx, hc, req) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d56eb43a2a..5cb4d8b59e 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -153,6 +153,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -165,11 +166,14 @@ struct ltchars { #include #include #include +#include #include #include #include #include #include +#include +#include #include #include #include @@ -374,7 +378,7 @@ ccflags="$@" $2 == "IFNAMSIZ" || $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ || $2 ~ /^SYSCTL_VERS/ || - $2 ~ /^(MS|MNT)_/ || + $2 ~ /^(MS|MNT|UMOUNT)_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|E?FD|NAME|S|PTRACE|PT)_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || @@ -402,8 +406,11 @@ ccflags="$@" $2 ~ /^GRND_/ || $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || + $2 ~ /^PERF_EVENT_IOC_/ || + $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SPLICE_/ || $2 ~ /^(VM|VMADDR)_/ || + $2 ~ /^XATTR_(CREATE|REPLACE)/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 77f9c54e63..a6ac8e92b2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -36,6 +36,20 @@ func Creat(path string, mode uint32) (fd int, err error) { return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) } +//sys fchmodat(dirfd int, path string, mode uint32) (err error) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior + // and check the flags. Otherwise the mode would be applied to the symlink + // destination which is not what the user expects. + if flags&^AT_SYMLINK_NOFOLLOW != 0 { + return EINVAL + } else if flags&AT_SYMLINK_NOFOLLOW != 0 { + return EOPNOTSUPP + } + return fchmodat(dirfd, path, mode) +} + //sys ioctl(fd int, req uint, arg uintptr) (err error) // ioctl itself should not be exposed directly, but additional get/set @@ -47,6 +61,10 @@ func IoctlSetInt(fd int, req uint, value int) (err error) { return ioctl(fd, req, uintptr(value)) } +func IoctlSetTermios(fd int, req uint, value *Termios) (err error) { + return ioctl(fd, req, uintptr(unsafe.Pointer(value))) +} + // IoctlGetInt performs an ioctl operation which gets an integer value // from fd, using the specified request number. func IoctlGetInt(fd int, req uint) (int, error) { @@ -55,6 +73,12 @@ func IoctlGetInt(fd int, req uint) (int, error) { return value, err } +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) func Link(oldpath string, newpath string) (err error) { @@ -318,10 +342,14 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, return } -func Mkfifo(path string, mode uint32) (err error) { +func Mkfifo(path string, mode uint32) error { return Mknod(path, mode|S_IFIFO, 0) } +func Mkfifoat(dirfd int, path string, mode uint32) error { + return Mknodat(dirfd, path, mode|S_IFIFO, 0) +} + func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -1177,7 +1205,6 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri //sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) -//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys Fdatasync(fd int) (err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e8d945ecf1..a6b3b5f143 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1393,6 +1403,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1400,6 +1413,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1407,7 +1430,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1427,13 +1452,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1452,11 +1485,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1752,6 +1789,7 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1788,6 +1826,8 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 97dc03991a..4ffc8d29c9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1394,6 +1404,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1401,6 +1414,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1408,7 +1431,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1428,13 +1453,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1453,11 +1486,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1753,6 +1790,7 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1789,6 +1827,8 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 52f08f227b..f4b178ef10 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1055,6 +1055,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1398,6 +1408,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1405,6 +1418,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1412,7 +1435,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1432,13 +1457,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1457,11 +1490,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1757,6 +1794,7 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1793,6 +1831,8 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 0cfe25b214..495f13b61f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1383,6 +1393,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1390,6 +1403,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1397,7 +1420,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1417,13 +1442,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1442,11 +1475,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1742,6 +1779,7 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1778,6 +1816,8 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 264a136a33..59651e4156 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1395,6 +1405,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1402,6 +1415,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x40047307 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1409,7 +1432,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1429,13 +1454,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1454,11 +1487,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x80 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1756,6 +1793,7 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1793,6 +1831,8 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index bde3efbd5f..a09bf9b181 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1395,6 +1405,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1402,6 +1415,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x40047307 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1409,7 +1432,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1429,13 +1454,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1454,11 +1487,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x80 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1756,6 +1793,7 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1793,6 +1831,8 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 167c22829c..72a0083c4b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1395,6 +1405,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1402,6 +1415,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x40047307 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1409,7 +1432,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1429,13 +1454,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1454,11 +1487,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x80 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1756,6 +1793,7 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1793,6 +1831,8 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 6c47abf7bc..84c0e3cc1d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1056,6 +1056,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1395,6 +1405,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1402,6 +1415,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x40047307 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1409,7 +1432,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1429,13 +1454,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1454,11 +1487,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x80 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1756,6 +1793,7 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1793,6 +1831,8 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 32eccbd809..8e4606e065 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1057,6 +1057,16 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1451,6 +1461,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1458,6 +1471,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1465,7 +1488,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1485,13 +1510,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1510,11 +1543,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1814,6 +1851,7 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -1850,6 +1888,8 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4000 XTABS = 0xc00 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index eb944e678f..16ed193116 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1057,6 +1057,16 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1451,6 +1461,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1458,6 +1471,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1465,7 +1488,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1485,13 +1510,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1510,11 +1543,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1814,6 +1851,7 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -1850,6 +1888,8 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4000 XTABS = 0xc00 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3566349072..bd385f809b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1055,6 +1055,16 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1455,6 +1465,9 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1462,6 +1475,16 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1469,7 +1492,9 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1489,13 +1514,21 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1514,11 +1547,15 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1814,6 +1851,7 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1850,6 +1888,8 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 51de8489a0..6c0845071a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 19f9fe5629..f34418dd17 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 47d7d5383f..92b4716efb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 7bc66933db..ec55920437 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index e563565d41..e6c2bf52af 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 1ee4a5e40f..f77f1d0964 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 80506f5915..d6ce861123 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 0a412e1bc8..c0134065aa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index c9751d258e..6b7a291cda 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 3848a49c33..7585277edf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 1e16935de3..987ce8664a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -14,6 +14,21 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -574,21 +589,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 7fc1eb2d98..8111206594 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -662,6 +662,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 60a26eeeaa..075d9c561f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -680,6 +680,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index b994baa3e6..a66c1603b3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -651,6 +651,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index c19c47881c..b3b506a6d3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -659,6 +659,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index c84e4620ca..5c654f552e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -656,6 +656,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 0c75cb937c..3f11fb657b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -661,6 +661,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index c75f75a2c3..1a4ad57e40 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -661,6 +661,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index cfc219f1b1..b3f0f30fd4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -656,6 +656,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 4c285227ca..aeee27e046 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -669,6 +669,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 1b511be223..b8cb2c3b21 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -669,6 +669,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x40045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index b408752d3b..58883f92bd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -686,6 +686,8 @@ type Sigset_t struct { const RNDGETENTCNT = 0x80045200 +const PERF_IOC_FLAG_GROUP = 0x1 + const _SC_PAGESIZE = 0x1e type Termios struct { diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 0f62046748..e77a370550 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -160,7 +160,6 @@ func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { default: panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") } - return } // A LazyDLL implements access to a single DLL. diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go new file mode 100644 index 0000000000..f63e899acb --- /dev/null +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +const ( + MEM_COMMIT = 0x00001000 + MEM_RESERVE = 0x00002000 + MEM_DECOMMIT = 0x00004000 + MEM_RELEASE = 0x00008000 + MEM_RESET = 0x00080000 + MEM_TOP_DOWN = 0x00100000 + MEM_WRITE_WATCH = 0x00200000 + MEM_PHYSICAL = 0x00400000 + MEM_RESET_UNDO = 0x01000000 + MEM_LARGE_PAGES = 0x20000000 + + PAGE_NOACCESS = 0x01 + PAGE_READONLY = 0x02 + PAGE_READWRITE = 0x04 + PAGE_WRITECOPY = 0x08 + PAGE_EXECUTE_READ = 0x20 + PAGE_EXECUTE_READWRITE = 0x40 + PAGE_EXECUTE_WRITECOPY = 0x80 +) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index e439c48ab7..518250e704 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -154,6 +154,9 @@ func NewCallbackCDecl(fn interface{}) uintptr //sys FlushViewOfFile(addr uintptr, length uintptr) (err error) //sys VirtualLock(addr uintptr, length uintptr) (err error) //sys VirtualUnlock(addr uintptr, length uintptr) (err error) +//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc +//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree +//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect //sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile //sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW //sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index b9ee827829..d588e1d033 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -139,6 +139,9 @@ var ( procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procVirtualLock = modkernel32.NewProc("VirtualLock") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") procTransmitFile = modmswsock.NewProc("TransmitFile") procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") @@ -1384,6 +1387,43 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { return } +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/sys/windows/ztypes_windows.go b/vendor/golang.org/x/sys/windows/ztypes_windows.go index a907ff2ce1..c99a3fe5f1 100644 --- a/vendor/golang.org/x/sys/windows/ztypes_windows.go +++ b/vendor/golang.org/x/sys/windows/ztypes_windows.go @@ -165,13 +165,6 @@ const ( PROCESS_QUERY_INFORMATION = 0x00000400 SYNCHRONIZE = 0x00100000 - PAGE_READONLY = 0x02 - PAGE_READWRITE = 0x04 - PAGE_WRITECOPY = 0x08 - PAGE_EXECUTE_READ = 0x20 - PAGE_EXECUTE_READWRITE = 0x40 - PAGE_EXECUTE_WRITECOPY = 0x80 - FILE_MAP_COPY = 0x01 FILE_MAP_WRITE = 0x02 FILE_MAP_READ = 0x04 diff --git a/vendor/golang.org/x/text/AUTHORS b/vendor/golang.org/x/text/AUTHORS deleted file mode 100644 index 15167cd746..0000000000 --- a/vendor/golang.org/x/text/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/text/CONTRIBUTING.md b/vendor/golang.org/x/text/CONTRIBUTING.md deleted file mode 100644 index 88dff59bc7..0000000000 --- a/vendor/golang.org/x/text/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/text/CONTRIBUTORS b/vendor/golang.org/x/text/CONTRIBUTORS deleted file mode 100644 index 1c4577e968..0000000000 --- a/vendor/golang.org/x/text/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/text/README b/vendor/golang.org/x/text/README deleted file mode 100644 index 4826fe8fb6..0000000000 --- a/vendor/golang.org/x/text/README +++ /dev/null @@ -1,23 +0,0 @@ -This repository holds supplementary Go libraries for text processing, many involving Unicode. - -To submit changes to this repository, see http://golang.org/doc/contribute.html. - -To generate the tables in this repository (except for the encoding tables), -run go generate from this directory. By default tables are generated for the -Unicode version in core and the CLDR version defined in -golang.org/x/text/unicode/cldr. - -Running go generate will as a side effect create a DATA subdirectory in this -directory which holds all files that are used as a source for generating the -tables. This directory will also serve as a cache. - -Run - - go test ./... - -from this directory to run all tests. Add the "-tags icu" flag to also run -ICU conformance tests (if available). This requires that you have the correct -ICU version installed on your system. - -TODO: -- updating unversioned source files. \ No newline at end of file diff --git a/vendor/golang.org/x/text/codereview.cfg b/vendor/golang.org/x/text/codereview.cfg deleted file mode 100644 index 3f8b14b64e..0000000000 --- a/vendor/golang.org/x/text/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/text/doc.go b/vendor/golang.org/x/text/doc.go deleted file mode 100644 index a48e2843fc..0000000000 --- a/vendor/golang.org/x/text/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// text is a repository of text-related packages related to internationalization -// (i18n) and localization (l10n), such as character encodings, text -// transformations, and locale-specific text handling. -package text - -// TODO: more documentation on general concepts, such as Transformers, use -// of normalization, etc. diff --git a/vendor/golang.org/x/text/gen.go b/vendor/golang.org/x/text/gen.go deleted file mode 100644 index 79af97e708..0000000000 --- a/vendor/golang.org/x/text/gen.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// gen runs go generate on Unicode- and CLDR-related package in the text -// repositories, taking into account dependencies and versions. -package main - -import ( - "bytes" - "flag" - "fmt" - "go/build" - "go/format" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "runtime" - "strings" - "sync" - "unicode" - - "golang.org/x/text/internal/gen" -) - -var ( - verbose = flag.Bool("v", false, "verbose output") - force = flag.Bool("force", false, "ignore failing dependencies") - doCore = flag.Bool("core", false, "force an update to core") - excludeList = flag.String("exclude", "", - "comma-separated list of packages to exclude") - - // The user can specify a selection of packages to build on the command line. - args []string -) - -func exclude(pkg string) bool { - if len(args) > 0 { - return !contains(args, pkg) - } - return contains(strings.Split(*excludeList, ","), pkg) -} - -// TODO: -// - Better version handling. -// - Generate tables for the core unicode package? -// - Add generation for encodings. This requires some retooling here and there. -// - Running repo-wide "long" tests. - -var vprintf = fmt.Printf - -func main() { - gen.Init() - args = flag.Args() - if !*verbose { - // Set vprintf to a no-op. - vprintf = func(string, ...interface{}) (int, error) { return 0, nil } - } - - // TODO: create temporary cache directory to load files and create and set - // a "cache" option if the user did not specify the UNICODE_DIR environment - // variable. This will prevent duplicate downloads and also will enable long - // tests, which really need to be run after each generated package. - - updateCore := *doCore - if gen.UnicodeVersion() != unicode.Version { - fmt.Printf("Requested Unicode version %s; core unicode version is %s.\n", - gen.UnicodeVersion(), - unicode.Version) - // TODO: use collate to compare. Simple comparison will work, though, - // until Unicode reaches version 10. To avoid circular dependencies, we - // could use the NumericWeighter without using package collate using a - // trivial Weighter implementation. - if gen.UnicodeVersion() < unicode.Version && !*force { - os.Exit(2) - } - updateCore = true - } - - var unicode = &dependency{} - if updateCore { - fmt.Printf("Updating core to version %s...\n", gen.UnicodeVersion()) - unicode = generate("unicode") - - // Test some users of the unicode packages, especially the ones that - // keep a mirrored table. These may need to be corrected by hand. - generate("regexp", unicode) - generate("strconv", unicode) // mimics Unicode table - generate("strings", unicode) - generate("testing", unicode) // mimics Unicode table - } - - var ( - cldr = generate("./unicode/cldr", unicode) - language = generate("./language", cldr) - internal = generate("./internal", unicode, language) - norm = generate("./unicode/norm", unicode) - rangetable = generate("./unicode/rangetable", unicode) - cases = generate("./cases", unicode, norm, language, rangetable) - width = generate("./width", unicode) - bidi = generate("./unicode/bidi", unicode, norm, rangetable) - mib = generate("./encoding/internal/identifier", unicode) - _ = generate("./encoding/htmlindex", unicode, language, mib) - _ = generate("./encoding/ianaindex", unicode, language, mib) - _ = generate("./secure/precis", unicode, norm, rangetable, cases, width, bidi) - _ = generate("./currency", unicode, cldr, language, internal) - _ = generate("./internal/number", unicode, cldr, language, internal) - _ = generate("./feature/plural", unicode, cldr, language, internal) - _ = generate("./internal/export/idna", unicode, bidi, norm) - _ = generate("./language/display", unicode, cldr, language, internal) - _ = generate("./collate", unicode, norm, cldr, language, rangetable) - _ = generate("./search", unicode, norm, cldr, language, rangetable) - ) - all.Wait() - - // Copy exported packages to the destination golang.org repo. - copyExported("golang.org/x/net/idna") - - if updateCore { - copyVendored() - } - - if hasErrors { - fmt.Println("FAIL") - os.Exit(1) - } - vprintf("SUCCESS\n") -} - -var ( - all sync.WaitGroup - hasErrors bool -) - -type dependency struct { - sync.WaitGroup - hasErrors bool -} - -func generate(pkg string, deps ...*dependency) *dependency { - var wg dependency - if exclude(pkg) { - return &wg - } - wg.Add(1) - all.Add(1) - go func() { - defer wg.Done() - defer all.Done() - // Wait for dependencies to finish. - for _, d := range deps { - d.Wait() - if d.hasErrors && !*force { - fmt.Printf("--- ABORT: %s\n", pkg) - wg.hasErrors = true - return - } - } - vprintf("=== GENERATE %s\n", pkg) - args := []string{"generate"} - if *verbose { - args = append(args, "-v") - } - args = append(args, pkg) - cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...) - w := &bytes.Buffer{} - cmd.Stderr = w - cmd.Stdout = w - if err := cmd.Run(); err != nil { - fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(w), err) - hasErrors = true - wg.hasErrors = true - return - } - - vprintf("=== TEST %s\n", pkg) - args[0] = "test" - cmd = exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...) - wt := &bytes.Buffer{} - cmd.Stderr = wt - cmd.Stdout = wt - if err := cmd.Run(); err != nil { - fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(wt), err) - hasErrors = true - wg.hasErrors = true - return - } - vprintf("--- SUCCESS: %s\n\t%v\n", pkg, indent(w)) - fmt.Print(wt.String()) - }() - return &wg -} - -// copyExported copies a package in x/text/internal/export to the -// destination repository. -func copyExported(p string) { - copyPackage( - filepath.Join("internal", "export", path.Base(p)), - filepath.Join("..", filepath.FromSlash(p[len("golang.org/x"):])), - "golang.org/x/text/internal/export/"+path.Base(p), - p) -} - -// copyVendored copies packages used by Go core into the vendored directory. -func copyVendored() { - root := filepath.Join(build.Default.GOROOT, filepath.FromSlash("src/vendor/golang_org/x")) - - err := filepath.Walk(root, func(dir string, info os.FileInfo, err error) error { - if err != nil || !info.IsDir() || root == dir { - return err - } - src := dir[len(root)+1:] - const slash = string(filepath.Separator) - if c := strings.Split(src, slash); c[0] == "text" { - // Copy a text repo package from its normal location. - src = strings.Join(c[1:], slash) - } else { - // Copy the vendored package if it exists in the export directory. - src = filepath.Join("internal", "export", filepath.Base(src)) - } - copyPackage(src, dir, "golang.org", "golang_org") - return nil - }) - if err != nil { - fmt.Printf("Seeding directory %s has failed %v:", root, err) - os.Exit(1) - } -} - -// goGenRE is used to remove go:generate lines. -var goGenRE = regexp.MustCompile("//go:generate[^\n]*\n") - -// copyPackage copies relevant files from a directory in x/text to the -// destination package directory. The destination package is assumed to have -// the same name. For each copied file go:generate lines are removed and -// and package comments are rewritten to the new path. -func copyPackage(dirSrc, dirDst, search, replace string) { - err := filepath.Walk(dirSrc, func(file string, info os.FileInfo, err error) error { - base := filepath.Base(file) - if err != nil || info.IsDir() || - !strings.HasSuffix(base, ".go") || - strings.HasSuffix(base, "_test.go") && !strings.HasPrefix(base, "example") || - // Don't process subdirectories. - filepath.Dir(file) != dirSrc { - return nil - } - b, err := ioutil.ReadFile(file) - if err != nil || bytes.Contains(b, []byte("\n// +build ignore")) { - return err - } - // Fix paths. - b = bytes.Replace(b, []byte(search), []byte(replace), -1) - // Remove go:generate lines. - b = goGenRE.ReplaceAllLiteral(b, nil) - comment := "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n" - if *doCore { - comment = "// Code generated by running \"go run gen.go -core\" in golang.org/x/text. DO NOT EDIT.\n\n" - } - if !bytes.HasPrefix(b, []byte(comment)) { - b = append([]byte(comment), b...) - } - if b, err = format.Source(b); err != nil { - fmt.Println("Failed to format file:", err) - os.Exit(1) - } - file = filepath.Join(dirDst, base) - vprintf("=== COPY %s\n", file) - return ioutil.WriteFile(file, b, 0666) - }) - if err != nil { - fmt.Println("Copying exported files failed:", err) - os.Exit(1) - } -} - -func contains(a []string, s string) bool { - for _, e := range a { - if s == e { - return true - } - } - return false -} - -func indent(b *bytes.Buffer) string { - return strings.Replace(strings.TrimSpace(b.String()), "\n", "\n\t", -1) -} diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go deleted file mode 100644 index d7031b6945..0000000000 --- a/vendor/golang.org/x/text/internal/gen/code.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gen - -import ( - "bytes" - "encoding/gob" - "fmt" - "hash" - "hash/fnv" - "io" - "log" - "os" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// This file contains utilities for generating code. - -// TODO: other write methods like: -// - slices, maps, types, etc. - -// CodeWriter is a utility for writing structured code. It computes the content -// hash and size of written content. It ensures there are newlines between -// written code blocks. -type CodeWriter struct { - buf bytes.Buffer - Size int - Hash hash.Hash32 // content hash - gob *gob.Encoder - // For comments we skip the usual one-line separator if they are followed by - // a code block. - skipSep bool -} - -func (w *CodeWriter) Write(p []byte) (n int, err error) { - return w.buf.Write(p) -} - -// NewCodeWriter returns a new CodeWriter. -func NewCodeWriter() *CodeWriter { - h := fnv.New32() - return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} -} - -// WriteGoFile appends the buffer with the total size of all created structures -// and writes it as a Go file to the the given file with the given package name. -func (w *CodeWriter) WriteGoFile(filename, pkg string) { - f, err := os.Create(filename) - if err != nil { - log.Fatalf("Could not create file %s: %v", filename, err) - } - defer f.Close() - if _, err = w.WriteGo(f, pkg); err != nil { - log.Fatalf("Error writing file %s: %v", filename, err) - } -} - -// WriteGo appends the buffer with the total size of all created structures and -// writes it as a Go file to the the given writer with the given package name. -func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { - sz := w.Size - w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) - defer w.buf.Reset() - return WriteGo(out, pkg, w.buf.Bytes()) -} - -func (w *CodeWriter) printf(f string, x ...interface{}) { - fmt.Fprintf(w, f, x...) -} - -func (w *CodeWriter) insertSep() { - if w.skipSep { - w.skipSep = false - return - } - // Use at least two newlines to ensure a blank space between the previous - // block. WriteGoFile will remove extraneous newlines. - w.printf("\n\n") -} - -// WriteComment writes a comment block. All line starts are prefixed with "//". -// Initial empty lines are gobbled. The indentation for the first line is -// stripped from consecutive lines. -func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { - s := fmt.Sprintf(comment, args...) - s = strings.Trim(s, "\n") - - // Use at least two newlines to ensure a blank space between the previous - // block. WriteGoFile will remove extraneous newlines. - w.printf("\n\n// ") - w.skipSep = true - - // strip first indent level. - sep := "\n" - for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { - sep += s[:1] - } - - strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) - - w.printf("\n") -} - -func (w *CodeWriter) writeSizeInfo(size int) { - w.printf("// Size: %d bytes\n", size) -} - -// WriteConst writes a constant of the given name and value. -func (w *CodeWriter) WriteConst(name string, x interface{}) { - w.insertSep() - v := reflect.ValueOf(x) - - switch v.Type().Kind() { - case reflect.String: - w.printf("const %s %s = ", name, typeName(x)) - w.WriteString(v.String()) - w.printf("\n") - default: - w.printf("const %s = %#v\n", name, x) - } -} - -// WriteVar writes a variable of the given name and value. -func (w *CodeWriter) WriteVar(name string, x interface{}) { - w.insertSep() - v := reflect.ValueOf(x) - oldSize := w.Size - sz := int(v.Type().Size()) - w.Size += sz - - switch v.Type().Kind() { - case reflect.String: - w.printf("var %s %s = ", name, typeName(x)) - w.WriteString(v.String()) - case reflect.Struct: - w.gob.Encode(x) - fallthrough - case reflect.Slice, reflect.Array: - w.printf("var %s = ", name) - w.writeValue(v) - w.writeSizeInfo(w.Size - oldSize) - default: - w.printf("var %s %s = ", name, typeName(x)) - w.gob.Encode(x) - w.writeValue(v) - w.writeSizeInfo(w.Size - oldSize) - } - w.printf("\n") -} - -func (w *CodeWriter) writeValue(v reflect.Value) { - x := v.Interface() - switch v.Kind() { - case reflect.String: - w.WriteString(v.String()) - case reflect.Array: - // Don't double count: callers of WriteArray count on the size being - // added, so we need to discount it here. - w.Size -= int(v.Type().Size()) - w.writeSlice(x, true) - case reflect.Slice: - w.writeSlice(x, false) - case reflect.Struct: - w.printf("%s{\n", typeName(v.Interface())) - t := v.Type() - for i := 0; i < v.NumField(); i++ { - w.printf("%s: ", t.Field(i).Name) - w.writeValue(v.Field(i)) - w.printf(",\n") - } - w.printf("}") - default: - w.printf("%#v", x) - } -} - -// WriteString writes a string literal. -func (w *CodeWriter) WriteString(s string) { - s = strings.Replace(s, `\`, `\\`, -1) - io.WriteString(w.Hash, s) // content hash - w.Size += len(s) - - const maxInline = 40 - if len(s) <= maxInline { - w.printf("%q", s) - return - } - - // We will render the string as a multi-line string. - const maxWidth = 80 - 4 - len(`"`) - len(`" +`) - - // When starting on its own line, go fmt indents line 2+ an extra level. - n, max := maxWidth, maxWidth-4 - - // As per https://golang.org/issue/18078, the compiler has trouble - // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, - // for large N. We insert redundant, explicit parentheses to work around - // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + - // ... + s127) + etc + (etc + ... + sN). - explicitParens, extraComment := len(s) > 128*1024, "" - if explicitParens { - w.printf(`(`) - extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" - } - - // Print "" +\n, if a string does not start on its own line. - b := w.buf.Bytes() - if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { - w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) - n, max = maxWidth, maxWidth - } - - w.printf(`"`) - - for sz, p, nLines := 0, 0, 0; p < len(s); { - var r rune - r, sz = utf8.DecodeRuneInString(s[p:]) - out := s[p : p+sz] - chars := 1 - if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { - switch sz { - case 1: - out = fmt.Sprintf("\\x%02x", s[p]) - case 2, 3: - out = fmt.Sprintf("\\u%04x", r) - case 4: - out = fmt.Sprintf("\\U%08x", r) - } - chars = len(out) - } - if n -= chars; n < 0 { - nLines++ - if explicitParens && nLines&63 == 63 { - w.printf("\") + (\"") - } - w.printf("\" +\n\"") - n = max - len(out) - } - w.printf("%s", out) - p += sz - } - w.printf(`"`) - if explicitParens { - w.printf(`)`) - } -} - -// WriteSlice writes a slice value. -func (w *CodeWriter) WriteSlice(x interface{}) { - w.writeSlice(x, false) -} - -// WriteArray writes an array value. -func (w *CodeWriter) WriteArray(x interface{}) { - w.writeSlice(x, true) -} - -func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { - v := reflect.ValueOf(x) - w.gob.Encode(v.Len()) - w.Size += v.Len() * int(v.Type().Elem().Size()) - name := typeName(x) - if isArray { - name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) - } - if isArray { - w.printf("%s{\n", name) - } else { - w.printf("%s{ // %d elements\n", name, v.Len()) - } - - switch kind := v.Type().Elem().Kind(); kind { - case reflect.String: - for _, s := range x.([]string) { - w.WriteString(s) - w.printf(",\n") - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - // nLine and nBlock are the number of elements per line and block. - nLine, nBlock, format := 8, 64, "%d," - switch kind { - case reflect.Uint8: - format = "%#02x," - case reflect.Uint16: - format = "%#04x," - case reflect.Uint32: - nLine, nBlock, format = 4, 32, "%#08x," - case reflect.Uint, reflect.Uint64: - nLine, nBlock, format = 4, 32, "%#016x," - case reflect.Int8: - nLine = 16 - } - n := nLine - for i := 0; i < v.Len(); i++ { - if i%nBlock == 0 && v.Len() > nBlock { - w.printf("// Entry %X - %X\n", i, i+nBlock-1) - } - x := v.Index(i).Interface() - w.gob.Encode(x) - w.printf(format, x) - if n--; n == 0 { - n = nLine - w.printf("\n") - } - } - w.printf("\n") - case reflect.Struct: - zero := reflect.Zero(v.Type().Elem()).Interface() - for i := 0; i < v.Len(); i++ { - x := v.Index(i).Interface() - w.gob.EncodeValue(v) - if !reflect.DeepEqual(zero, x) { - line := fmt.Sprintf("%#v,\n", x) - line = line[strings.IndexByte(line, '{'):] - w.printf("%d: ", i) - w.printf(line) - } - } - case reflect.Array: - for i := 0; i < v.Len(); i++ { - w.printf("%d: %#v,\n", i, v.Index(i).Interface()) - } - default: - panic("gen: slice elem type not supported") - } - w.printf("}") -} - -// WriteType writes a definition of the type of the given value and returns the -// type name. -func (w *CodeWriter) WriteType(x interface{}) string { - t := reflect.TypeOf(x) - w.printf("type %s struct {\n", t.Name()) - for i := 0; i < t.NumField(); i++ { - w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) - } - w.printf("}\n") - return t.Name() -} - -// typeName returns the name of the go type of x. -func typeName(x interface{}) string { - t := reflect.ValueOf(x).Type() - return strings.Replace(fmt.Sprint(t), "main.", "", 1) -} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go deleted file mode 100644 index 2acb0355a2..0000000000 --- a/vendor/golang.org/x/text/internal/gen/gen.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gen contains common code for the various code generation tools in the -// text repository. Its usage ensures consistency between tools. -// -// This package defines command line flags that are common to most generation -// tools. The flags allow for specifying specific Unicode and CLDR versions -// in the public Unicode data repository (http://www.unicode.org/Public). -// -// A local Unicode data mirror can be set through the flag -local or the -// environment variable UNICODE_DIR. The former takes precedence. The local -// directory should follow the same structure as the public repository. -// -// IANA data can also optionally be mirrored by putting it in the iana directory -// rooted at the top of the local mirror. Beware, though, that IANA data is not -// versioned. So it is up to the developer to use the right version. -package gen // import "golang.org/x/text/internal/gen" - -import ( - "bytes" - "flag" - "fmt" - "go/build" - "go/format" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "path" - "path/filepath" - "sync" - "unicode" - - "golang.org/x/text/unicode/cldr" -) - -var ( - url = flag.String("url", - "http://www.unicode.org/Public", - "URL of Unicode database directory") - iana = flag.String("iana", - "http://www.iana.org", - "URL of the IANA repository") - unicodeVersion = flag.String("unicode", - getEnv("UNICODE_VERSION", unicode.Version), - "unicode version to use") - cldrVersion = flag.String("cldr", - getEnv("CLDR_VERSION", cldr.Version), - "cldr version to use") -) - -func getEnv(name, def string) string { - if v := os.Getenv(name); v != "" { - return v - } - return def -} - -// Init performs common initialization for a gen command. It parses the flags -// and sets up the standard logging parameters. -func Init() { - log.SetPrefix("") - log.SetFlags(log.Lshortfile) - flag.Parse() -} - -const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package %s - -` - -// UnicodeVersion reports the requested Unicode version. -func UnicodeVersion() string { - return *unicodeVersion -} - -// UnicodeVersion reports the requested CLDR version. -func CLDRVersion() string { - return *cldrVersion -} - -// IsLocal reports whether data files are available locally. -func IsLocal() bool { - dir, err := localReadmeFile() - if err != nil { - return false - } - if _, err = os.Stat(dir); err != nil { - return false - } - return true -} - -// OpenUCDFile opens the requested UCD file. The file is specified relative to -// the public Unicode root directory. It will call log.Fatal if there are any -// errors. -func OpenUCDFile(file string) io.ReadCloser { - return openUnicode(path.Join(*unicodeVersion, "ucd", file)) -} - -// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there -// are any errors. -func OpenCLDRCoreZip() io.ReadCloser { - return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") -} - -// OpenUnicodeFile opens the requested file of the requested category from the -// root of the Unicode data archive. The file is specified relative to the -// public Unicode root directory. If version is "", it will use the default -// Unicode version. It will call log.Fatal if there are any errors. -func OpenUnicodeFile(category, version, file string) io.ReadCloser { - if version == "" { - version = UnicodeVersion() - } - return openUnicode(path.Join(category, version, file)) -} - -// OpenIANAFile opens the requested IANA file. The file is specified relative -// to the IANA root, which is typically either http://www.iana.org or the -// iana directory in the local mirror. It will call log.Fatal if there are any -// errors. -func OpenIANAFile(path string) io.ReadCloser { - return Open(*iana, "iana", path) -} - -var ( - dirMutex sync.Mutex - localDir string -) - -const permissions = 0755 - -func localReadmeFile() (string, error) { - p, err := build.Import("golang.org/x/text", "", build.FindOnly) - if err != nil { - return "", fmt.Errorf("Could not locate package: %v", err) - } - return filepath.Join(p.Dir, "DATA", "README"), nil -} - -func getLocalDir() string { - dirMutex.Lock() - defer dirMutex.Unlock() - - readme, err := localReadmeFile() - if err != nil { - log.Fatal(err) - } - dir := filepath.Dir(readme) - if _, err := os.Stat(readme); err != nil { - if err := os.MkdirAll(dir, permissions); err != nil { - log.Fatalf("Could not create directory: %v", err) - } - ioutil.WriteFile(readme, []byte(readmeTxt), permissions) - } - return dir -} - -const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. - -This directory contains downloaded files used to generate the various tables -in the golang.org/x/text subrepo. - -Note that the language subtag repo (iana/assignments/language-subtag-registry) -and all other times in the iana subdirectory are not versioned and will need -to be periodically manually updated. The easiest way to do this is to remove -the entire iana directory. This is mostly of concern when updating the language -package. -` - -// Open opens subdir/path if a local directory is specified and the file exists, -// where subdir is a directory relative to the local root, or fetches it from -// urlRoot/path otherwise. It will call log.Fatal if there are any errors. -func Open(urlRoot, subdir, path string) io.ReadCloser { - file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) - return open(file, urlRoot, path) -} - -func openUnicode(path string) io.ReadCloser { - file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) - return open(file, *url, path) -} - -// TODO: automatically periodically update non-versioned files. - -func open(file, urlRoot, path string) io.ReadCloser { - if f, err := os.Open(file); err == nil { - return f - } - r := get(urlRoot, path) - defer r.Close() - b, err := ioutil.ReadAll(r) - if err != nil { - log.Fatalf("Could not download file: %v", err) - } - os.MkdirAll(filepath.Dir(file), permissions) - if err := ioutil.WriteFile(file, b, permissions); err != nil { - log.Fatalf("Could not create file: %v", err) - } - return ioutil.NopCloser(bytes.NewReader(b)) -} - -func get(root, path string) io.ReadCloser { - url := root + "/" + path - fmt.Printf("Fetching %s...", url) - defer fmt.Println(" done.") - resp, err := http.Get(url) - if err != nil { - log.Fatalf("HTTP GET: %v", err) - } - if resp.StatusCode != 200 { - log.Fatalf("Bad GET status for %q: %q", url, resp.Status) - } - return resp.Body -} - -// TODO: use Write*Version in all applicable packages. - -// WriteUnicodeVersion writes a constant for the Unicode version from which the -// tables are generated. -func WriteUnicodeVersion(w io.Writer) { - fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") - fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) -} - -// WriteCLDRVersion writes a constant for the CLDR version from which the -// tables are generated. -func WriteCLDRVersion(w io.Writer) { - fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") - fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) -} - -// WriteGoFile prepends a standard file comment and package statement to the -// given bytes, applies gofmt, and writes them to a file with the given name. -// It will call log.Fatal if there are any errors. -func WriteGoFile(filename, pkg string, b []byte) { - w, err := os.Create(filename) - if err != nil { - log.Fatalf("Could not create file %s: %v", filename, err) - } - defer w.Close() - if _, err = WriteGo(w, pkg, b); err != nil { - log.Fatalf("Error writing file %s: %v", filename, err) - } -} - -// WriteGo prepends a standard file comment and package statement to the given -// bytes, applies gofmt, and writes them to w. -func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { - src := []byte(fmt.Sprintf(header, pkg)) - src = append(src, b...) - formatted, err := format.Source(src) - if err != nil { - // Print the generated code even in case of an error so that the - // returned error can be meaningfully interpreted. - n, _ = w.Write(src) - return n, err - } - return w.Write(formatted) -} - -// Repackage rewrites a Go file from belonging to package main to belonging to -// the given package. -func Repackage(inFile, outFile, pkg string) { - src, err := ioutil.ReadFile(inFile) - if err != nil { - log.Fatalf("reading %s: %v", inFile, err) - } - const toDelete = "package main\n\n" - i := bytes.Index(src, []byte(toDelete)) - if i < 0 { - log.Fatalf("Could not find %q in %s.", toDelete, inFile) - } - w := &bytes.Buffer{} - w.Write(src[i+len(toDelete):]) - WriteGoFile(outFile, pkg, w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go deleted file mode 100644 index 2382f4d6da..0000000000 --- a/vendor/golang.org/x/text/unicode/cldr/base.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cldr - -import ( - "encoding/xml" - "regexp" - "strconv" -) - -// Elem is implemented by every XML element. -type Elem interface { - setEnclosing(Elem) - setName(string) - enclosing() Elem - - GetCommon() *Common -} - -type hidden struct { - CharData string `xml:",chardata"` - Alias *struct { - Common - Source string `xml:"source,attr"` - Path string `xml:"path,attr"` - } `xml:"alias"` - Def *struct { - Common - Choice string `xml:"choice,attr,omitempty"` - Type string `xml:"type,attr,omitempty"` - } `xml:"default"` -} - -// Common holds several of the most common attributes and sub elements -// of an XML element. -type Common struct { - XMLName xml.Name - name string - enclElem Elem - Type string `xml:"type,attr,omitempty"` - Reference string `xml:"reference,attr,omitempty"` - Alt string `xml:"alt,attr,omitempty"` - ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` - Draft string `xml:"draft,attr,omitempty"` - hidden -} - -// Default returns the default type to select from the enclosed list -// or "" if no default value is specified. -func (e *Common) Default() string { - if e.Def == nil { - return "" - } - if e.Def.Choice != "" { - return e.Def.Choice - } else if e.Def.Type != "" { - // Type is still used by the default element in collation. - return e.Def.Type - } - return "" -} - -// GetCommon returns e. It is provided such that Common implements Elem. -func (e *Common) GetCommon() *Common { - return e -} - -// Data returns the character data accumulated for this element. -func (e *Common) Data() string { - e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) - return e.CharData -} - -func (e *Common) setName(s string) { - e.name = s -} - -func (e *Common) enclosing() Elem { - return e.enclElem -} - -func (e *Common) setEnclosing(en Elem) { - e.enclElem = en -} - -// Escape characters that can be escaped without further escaping the string. -var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) - -// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. -// It assumes the input string is correctly formatted. -func replaceUnicode(s string) string { - if s[1] == '#' { - r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) - return string(r) - } - r, _, _, _ := strconv.UnquoteChar(s, 0) - return string(r) -} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go deleted file mode 100644 index 2197f8ac26..0000000000 --- a/vendor/golang.org/x/text/unicode/cldr/cldr.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run makexml.go -output xml.go - -// Package cldr provides a parser for LDML and related XML formats. -// This package is intended to be used by the table generation tools -// for the various internationalization-related packages. -// As the XML types are generated from the CLDR DTD, and as the CLDR standard -// is periodically amended, this package may change considerably over time. -// This mostly means that data may appear and disappear between versions. -// That is, old code should keep compiling for newer versions, but data -// may have moved or changed. -// CLDR version 22 is the first version supported by this package. -// Older versions may not work. -package cldr // import "golang.org/x/text/unicode/cldr" - -import ( - "fmt" - "sort" -) - -// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. -type CLDR struct { - parent map[string][]string - locale map[string]*LDML - resolved map[string]*LDML - bcp47 *LDMLBCP47 - supp *SupplementalData -} - -func makeCLDR() *CLDR { - return &CLDR{ - parent: make(map[string][]string), - locale: make(map[string]*LDML), - resolved: make(map[string]*LDML), - bcp47: &LDMLBCP47{}, - supp: &SupplementalData{}, - } -} - -// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. -func (cldr *CLDR) BCP47() *LDMLBCP47 { - return nil -} - -// Draft indicates the draft level of an element. -type Draft int - -const ( - Approved Draft = iota - Contributed - Provisional - Unconfirmed -) - -var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} - -// ParseDraft returns the Draft value corresponding to the given string. The -// empty string corresponds to Approved. -func ParseDraft(level string) (Draft, error) { - if level == "" { - return Approved, nil - } - for i, s := range drafts { - if level == s { - return Unconfirmed - Draft(i), nil - } - } - return Approved, fmt.Errorf("cldr: unknown draft level %q", level) -} - -func (d Draft) String() string { - return drafts[len(drafts)-1-int(d)] -} - -// SetDraftLevel sets which draft levels to include in the evaluated LDML. -// Any draft element for which the draft level is higher than lev will be excluded. -// If multiple draft levels are available for a single element, the one with the -// lowest draft level will be selected, unless preferDraft is true, in which case -// the highest draft will be chosen. -// It is assumed that the underlying LDML is canonicalized. -func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { - // TODO: implement - cldr.resolved = make(map[string]*LDML) -} - -// RawLDML returns the LDML XML for id in unresolved form. -// id must be one of the strings returned by Locales. -func (cldr *CLDR) RawLDML(loc string) *LDML { - return cldr.locale[loc] -} - -// LDML returns the fully resolved LDML XML for loc, which must be one of -// the strings returned by Locales. -func (cldr *CLDR) LDML(loc string) (*LDML, error) { - return cldr.resolve(loc) -} - -// Supplemental returns the parsed supplemental data. If no such data was parsed, -// nil is returned. -func (cldr *CLDR) Supplemental() *SupplementalData { - return cldr.supp -} - -// Locales returns the locales for which there exist files. -// Valid sublocales for which there is no file are not included. -// The root locale is always sorted first. -func (cldr *CLDR) Locales() []string { - loc := []string{"root"} - hasRoot := false - for l, _ := range cldr.locale { - if l == "root" { - hasRoot = true - continue - } - loc = append(loc, l) - } - sort.Strings(loc[1:]) - if !hasRoot { - return loc[1:] - } - return loc -} - -// Get fills in the fields of x based on the XPath path. -func Get(e Elem, path string) (res Elem, err error) { - return walkXPath(e, path) -} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go deleted file mode 100644 index 80ee28d795..0000000000 --- a/vendor/golang.org/x/text/unicode/cldr/collate.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cldr - -import ( - "bufio" - "encoding/xml" - "errors" - "fmt" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// RuleProcessor can be passed to Collator's Process method, which -// parses the rules and calls the respective method for each rule found. -type RuleProcessor interface { - Reset(anchor string, before int) error - Insert(level int, str, context, extend string) error - Index(id string) -} - -const ( - // cldrIndex is a Unicode-reserved sentinel value used to mark the start - // of a grouping within an index. - // We ignore any rule that starts with this rune. - // See http://unicode.org/reports/tr35/#Collation_Elements for details. - cldrIndex = "\uFDD0" - - // specialAnchor is the format in which to represent logical reset positions, - // such as "first tertiary ignorable". - specialAnchor = "<%s/>" -) - -// Process parses the rules for the tailorings of this collation -// and calls the respective methods of p for each rule found. -func (c Collation) Process(p RuleProcessor) (err error) { - if len(c.Cr) > 0 { - if len(c.Cr) > 1 { - return fmt.Errorf("multiple cr elements, want 0 or 1") - } - return processRules(p, c.Cr[0].Data()) - } - if c.Rules.Any != nil { - return c.processXML(p) - } - return errors.New("no tailoring data") -} - -// processRules parses rules in the Collation Rule Syntax defined in -// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. -func processRules(p RuleProcessor, s string) (err error) { - chk := func(s string, e error) string { - if err == nil { - err = e - } - return s - } - i := 0 // Save the line number for use after the loop. - scanner := bufio.NewScanner(strings.NewReader(s)) - for ; scanner.Scan() && err == nil; i++ { - for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { - level := 5 - var ch byte - switch ch, s = s[0], s[1:]; ch { - case '&': // followed by or '[' ']' - if s = skipSpace(s); consume(&s, '[') { - s = chk(parseSpecialAnchor(p, s)) - } else { - s = chk(parseAnchor(p, 0, s)) - } - case '<': // sort relation '<'{1,4}, optionally followed by '*'. - for level = 1; consume(&s, '<'); level++ { - } - if level > 4 { - err = fmt.Errorf("level %d > 4", level) - } - fallthrough - case '=': // identity relation, optionally followed by *. - if consume(&s, '*') { - s = chk(parseSequence(p, level, s)) - } else { - s = chk(parseOrder(p, level, s)) - } - default: - chk("", fmt.Errorf("illegal operator %q", ch)) - break - } - } - } - if chk("", scanner.Err()); err != nil { - return fmt.Errorf("%d: %v", i, err) - } - return nil -} - -// parseSpecialAnchor parses the anchor syntax which is either of the form -// ['before' ] -// or -// [